github.com/nats-io/nats-server/v2@v2.11.0-preview.2/server/jetstream_cluster_1_test.go (about) 1 // Copyright 2020-2022 The NATS Authors 2 // Licensed under the Apache License, Version 2.0 (the "License"); 3 // you may not use this file except in compliance with the License. 4 // You may obtain a copy of the License at 5 // 6 // http://www.apache.org/licenses/LICENSE-2.0 7 // 8 // Unless required by applicable law or agreed to in writing, software 9 // distributed under the License is distributed on an "AS IS" BASIS, 10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 //go:build !skip_js_tests && !skip_js_cluster_tests 15 // +build !skip_js_tests,!skip_js_cluster_tests 16 17 package server 18 19 import ( 20 "bytes" 21 "context" 22 crand "crypto/rand" 23 "encoding/json" 24 "fmt" 25 "math/rand" 26 "os" 27 "path/filepath" 28 "reflect" 29 "strings" 30 "sync" 31 "sync/atomic" 32 "testing" 33 "time" 34 35 "github.com/nats-io/jwt/v2" 36 "github.com/nats-io/nats.go" 37 ) 38 39 func TestJetStreamClusterConfig(t *testing.T) { 40 conf := createConfFile(t, []byte(` 41 listen: 127.0.0.1:-1 42 jetstream: {max_mem_store: 16GB, max_file_store: 10TB, store_dir: '%s'} 43 cluster { listen: 127.0.0.1:-1 } 44 `)) 45 46 check := func(errStr string) { 47 t.Helper() 48 opts, err := ProcessConfigFile(conf) 49 if err != nil { 50 t.Fatalf("Unexpected error: %v", err) 51 } 52 if _, err := NewServer(opts); err == nil || !strings.Contains(err.Error(), errStr) { 53 t.Fatalf("Expected an error of `%s`, got `%v`", errStr, err) 54 } 55 } 56 57 check("requires `server_name`") 58 59 conf = createConfFile(t, []byte(` 60 listen: 127.0.0.1:-1 61 server_name: "TEST" 62 jetstream: {max_mem_store: 16GB, max_file_store: 10TB, store_dir: '%s'} 63 cluster { listen: 127.0.0.1:-1 } 64 `)) 65 66 check("requires `cluster.name`") 67 } 68 69 func TestJetStreamClusterLeader(t *testing.T) { 70 c := createJetStreamClusterExplicit(t, "JSC", 3) 71 defer c.shutdown() 72 73 // Kill our current leader and force an election. 74 c.leader().Shutdown() 75 c.waitOnLeader() 76 77 // Now killing our current leader should leave us leaderless. 78 c.leader().Shutdown() 79 c.expectNoLeader() 80 } 81 82 func TestJetStreamClusterExpand(t *testing.T) { 83 c := createJetStreamClusterExplicit(t, "JSC", 2) 84 defer c.shutdown() 85 86 c.addInNewServer() 87 c.waitOnPeerCount(3) 88 } 89 90 func TestJetStreamClusterAccountInfo(t *testing.T) { 91 c := createJetStreamClusterExplicit(t, "JSC", 3) 92 defer c.shutdown() 93 94 nc := clientConnectToServer(t, c.randomServer()) 95 defer nc.Close() 96 97 reply := nats.NewInbox() 98 sub, _ := nc.SubscribeSync(reply) 99 100 if err := nc.PublishRequest(JSApiAccountInfo, reply, nil); err != nil { 101 t.Fatalf("Unexpected error: %v", err) 102 } 103 checkSubsPending(t, sub, 1) 104 resp, _ := sub.NextMsg(0) 105 106 var info JSApiAccountInfoResponse 107 if err := json.Unmarshal(resp.Data, &info); err != nil { 108 t.Fatalf("Unexpected error: %v", err) 109 } 110 if info.JetStreamAccountStats == nil || info.Error != nil { 111 t.Fatalf("Did not receive correct response: %+v", info.Error) 112 } 113 // Make sure we only got 1 response. 114 // Technically this will always work since its a singelton service export. 115 if nmsgs, _, _ := sub.Pending(); nmsgs > 0 { 116 t.Fatalf("Expected only a single response, got %d more", nmsgs) 117 } 118 } 119 120 func TestJetStreamClusterStreamLimitWithAccountDefaults(t *testing.T) { 121 // 2MB memory, 8MB disk 122 c := createJetStreamClusterWithTemplate(t, jsClusterLimitsTempl, "R3L", 3) 123 defer c.shutdown() 124 125 // Client based API 126 s := c.randomNonLeader() 127 nc, js := jsClientConnect(t, s) 128 defer nc.Close() 129 130 _, err := js.AddStream(&nats.StreamConfig{ 131 Name: "TEST", 132 Subjects: []string{"foo", "bar"}, 133 Replicas: 2, 134 MaxBytes: 4 * 1024 * 1024, 135 }) 136 if err != nil { 137 t.Fatalf("Unexpected error: %v", err) 138 } 139 140 _, err = js.AddStream(&nats.StreamConfig{ 141 Name: "TEST2", 142 Replicas: 2, 143 MaxBytes: 15 * 1024 * 1024, 144 }) 145 require_Contains(t, err.Error(), "no suitable peers for placement", "insufficient storage") 146 } 147 148 func TestJetStreamClusterInfoRaftGroup(t *testing.T) { 149 c := createJetStreamClusterExplicit(t, "R1S", 3) 150 defer c.shutdown() 151 152 s := c.randomNonLeader() 153 nc, js := jsClientConnect(t, s) 154 defer nc.Close() 155 156 acc := s.GlobalAccount() 157 158 _, err := js.AddStream(&nats.StreamConfig{ 159 Name: "TEST", 160 Subjects: []string{"foo", "bar"}, 161 Storage: nats.FileStorage, 162 Replicas: 3, 163 }) 164 require_NoError(t, err) 165 166 nfoResp, err := nc.Request("$JS.API.STREAM.INFO.TEST", nil, time.Second) 167 require_NoError(t, err) 168 169 var si StreamInfo 170 err = json.Unmarshal(nfoResp.Data, &si) 171 require_NoError(t, err) 172 173 if si.Cluster == nil { 174 t.Fatalf("Expected cluster info, got none") 175 } 176 177 stream, err := acc.lookupStream("TEST") 178 require_NoError(t, err) 179 180 if si.Cluster.RaftGroup != stream.raftGroup().Name { 181 t.Fatalf("Expected raft group %q to equal %q", si.Cluster.RaftGroup, stream.raftGroup().Name) 182 } 183 184 var sscfg StreamConfig 185 rCfgData, err := os.ReadFile(filepath.Join(s.opts.StoreDir, "jetstream", "$SYS", "_js_", stream.raftGroup().Name, "meta.inf")) 186 require_NoError(t, err) 187 err = json.Unmarshal(rCfgData, &sscfg) 188 require_NoError(t, err) 189 if !reflect.DeepEqual(sscfg.Metadata, map[string]string{"account": "$G", "stream": "TEST", "type": "stream"}) { 190 t.Fatalf("Invalid raft stream metadata: %v", sscfg.Metadata) 191 } 192 193 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "DURABLE", Replicas: 3}) 194 require_NoError(t, err) 195 196 consumer := stream.lookupConsumer("DURABLE") 197 198 var ci ConsumerInfo 199 nfoResp, err = nc.Request("$JS.API.CONSUMER.INFO.TEST.DURABLE", nil, time.Second) 200 require_NoError(t, err) 201 202 var cscfg ConsumerConfig 203 rCfgData, err = os.ReadFile(filepath.Join(s.opts.StoreDir, "jetstream", "$SYS", "_js_", consumer.raftGroup().Name, "meta.inf")) 204 require_NoError(t, err) 205 err = json.Unmarshal(rCfgData, &cscfg) 206 require_NoError(t, err) 207 if !reflect.DeepEqual(cscfg.Metadata, map[string]string{"account": "$G", "consumer": "DURABLE", "stream": "TEST", "type": "consumer"}) { 208 t.Fatalf("Invalid raft stream metadata: %v", cscfg.Metadata) 209 } 210 211 err = json.Unmarshal(nfoResp.Data, &ci) 212 require_NoError(t, err) 213 214 if ci.Cluster.RaftGroup != consumer.raftGroup().Name { 215 t.Fatalf("Expected raft group %q to equal %q", ci.Cluster.RaftGroup, consumer.raftGroup().Name) 216 } 217 } 218 219 func TestJetStreamClusterSingleReplicaStreams(t *testing.T) { 220 c := createJetStreamClusterExplicit(t, "R1S", 3) 221 defer c.shutdown() 222 223 // Client based API 224 s := c.randomNonLeader() 225 nc, js := jsClientConnect(t, s) 226 defer nc.Close() 227 228 si, err := js.AddStream(&nats.StreamConfig{ 229 Name: "TEST", 230 Subjects: []string{"foo", "bar"}, 231 }) 232 if err != nil { 233 t.Fatalf("Unexpected error: %v", err) 234 } 235 if si.Cluster == nil { 236 t.Fatalf("Expected si to have cluster info") 237 } 238 // Send in 10 messages. 239 msg, toSend := []byte("Hello JS Clustering"), 10 240 for i := 0; i < toSend; i++ { 241 if _, err = js.Publish("foo", msg); err != nil { 242 t.Fatalf("Unexpected publish error: %v", err) 243 } 244 } 245 // Now grab info for this stream. 246 si, err = js.StreamInfo("TEST") 247 if err != nil { 248 t.Fatalf("Unexpected error: %v", err) 249 } 250 if si == nil || si.Config.Name != "TEST" { 251 t.Fatalf("StreamInfo is not correct %+v", si) 252 } 253 // Check active state as well, shows that the owner answered. 254 if si.State.Msgs != uint64(toSend) { 255 t.Fatalf("Expected %d msgs, got bad state: %+v", toSend, si.State) 256 } 257 // Now create a consumer. This should be pinned to same server that our stream was allocated to. 258 // First do a normal sub. 259 sub, err := js.SubscribeSync("foo") 260 if err != nil { 261 t.Fatalf("Unexpected error: %v", err) 262 } 263 264 checkSubsPending(t, sub, toSend) 265 266 // Now create a consumer as well. 267 ci, err := js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy}) 268 if err != nil { 269 t.Fatalf("Unexpected error: %v", err) 270 } 271 if ci == nil || ci.Name != "dlc" || ci.Stream != "TEST" { 272 t.Fatalf("ConsumerInfo is not correct %+v", ci) 273 } 274 275 // Now make sure that if we kill and restart the server that this stream and consumer return. 276 sl := c.streamLeader("$G", "TEST") 277 sl.Shutdown() 278 c.restartServer(sl) 279 280 c.waitOnStreamLeader("$G", "TEST") 281 si, err = js.StreamInfo("TEST") 282 if err != nil { 283 t.Fatalf("Unexpected error: %v", err) 284 } 285 if si == nil || si.Config.Name != "TEST" { 286 t.Fatalf("StreamInfo is not correct %+v", si) 287 } 288 // Now durable consumer. 289 c.waitOnConsumerLeader("$G", "TEST", "dlc") 290 if _, err = js.ConsumerInfo("TEST", "dlc"); err != nil { 291 t.Fatalf("Unexpected error: %v", err) 292 } 293 } 294 295 func TestJetStreamClusterMultiReplicaStreams(t *testing.T) { 296 c := createJetStreamClusterExplicit(t, "RNS", 5) 297 defer c.shutdown() 298 299 // Client based API 300 s := c.randomServer() 301 nc, js := jsClientConnect(t, s) 302 defer nc.Close() 303 304 _, err := js.AddStream(&nats.StreamConfig{ 305 Name: "TEST", 306 Subjects: []string{"foo", "bar"}, 307 Replicas: 3, 308 }) 309 if err != nil { 310 t.Fatalf("Unexpected error: %v", err) 311 } 312 // Send in 10 messages. 313 msg, toSend := []byte("Hello JS Clustering"), 10 314 for i := 0; i < toSend; i++ { 315 if _, err = js.Publish("foo", msg); err != nil { 316 t.Fatalf("Unexpected publish error: %v", err) 317 } 318 } 319 320 // Now grab info for this stream. 321 si, err := js.StreamInfo("TEST") 322 if err != nil { 323 t.Fatalf("Unexpected error: %v", err) 324 } 325 if si == nil || si.Config.Name != "TEST" { 326 t.Fatalf("StreamInfo is not correct %+v", si) 327 } 328 // Check active state as well, shows that the owner answered. 329 if si.State.Msgs != uint64(toSend) { 330 t.Fatalf("Expected %d msgs, got bad state: %+v", toSend, si.State) 331 } 332 // Now create a consumer. This should be affinitize to the same set of servers as the stream. 333 // First do a normal sub. 334 sub, err := js.SubscribeSync("foo") 335 if err != nil { 336 t.Fatalf("Unexpected error: %v", err) 337 } 338 339 checkSubsPending(t, sub, toSend) 340 341 // Now create a consumer as well. 342 ci, err := js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy}) 343 if err != nil { 344 t.Fatalf("Unexpected error: %v", err) 345 } 346 if ci == nil || ci.Name != "dlc" || ci.Stream != "TEST" || ci.NumPending != uint64(toSend) { 347 t.Fatalf("ConsumerInfo is not correct %+v", ci) 348 } 349 } 350 351 func TestJetStreamClusterMultiReplicaStreamsDefaultFileMem(t *testing.T) { 352 const testConfig = ` 353 listen: 127.0.0.1:-1 354 server_name: %s 355 jetstream: {store_dir: '%s'} 356 357 cluster { 358 name: %s 359 listen: 127.0.0.1:%d 360 routes = [%s] 361 } 362 ` 363 c := createJetStreamClusterWithTemplate(t, testConfig, "RNS", 3) 364 defer c.shutdown() 365 366 // Client based API 367 s := c.randomServer() 368 nc, js := jsClientConnect(t, s) 369 defer nc.Close() 370 371 _, err := js.AddStream(&nats.StreamConfig{ 372 Name: "TEST", 373 Subjects: []string{"foo", "bar"}, 374 Replicas: 3, 375 MaxBytes: 1024, 376 }) 377 if err != nil { 378 t.Fatalf("Unexpected error: %v", err) 379 } 380 // Send in 10 messages. 381 msg, toSend := []byte("Hello JS Clustering"), 10 382 for i := 0; i < toSend; i++ { 383 if _, err = js.Publish("foo", msg); err != nil { 384 t.Fatalf("Unexpected publish error: %v", err) 385 } 386 } 387 388 // Now grab info for this stream. 389 si, err := js.StreamInfo("TEST") 390 if err != nil { 391 t.Fatalf("Unexpected error: %v", err) 392 } 393 if si == nil || si.Config.Name != "TEST" { 394 t.Fatalf("StreamInfo is not correct %+v", si) 395 } 396 // Check active state as well, shows that the owner answered. 397 if si.State.Msgs != uint64(toSend) { 398 t.Fatalf("Expected %d msgs, got bad state: %+v", toSend, si.State) 399 } 400 // Now create a consumer. This should be affinitize to the same set of servers as the stream. 401 // First do a normal sub. 402 sub, err := js.SubscribeSync("foo") 403 if err != nil { 404 t.Fatalf("Unexpected error: %v", err) 405 } 406 407 checkSubsPending(t, sub, toSend) 408 409 // Now create a consumer as well. 410 ci, err := js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy}) 411 if err != nil { 412 t.Fatalf("Unexpected error: %v", err) 413 } 414 if ci == nil || ci.Name != "dlc" || ci.Stream != "TEST" || ci.NumPending != uint64(toSend) { 415 t.Fatalf("ConsumerInfo is not correct %+v", ci) 416 } 417 } 418 419 func TestJetStreamClusterMemoryStore(t *testing.T) { 420 c := createJetStreamClusterExplicit(t, "R3M", 3) 421 defer c.shutdown() 422 423 // Client based API 424 nc, js := jsClientConnect(t, c.randomServer()) 425 defer nc.Close() 426 427 _, err := js.AddStream(&nats.StreamConfig{ 428 Name: "TEST", 429 Subjects: []string{"foo", "bar"}, 430 Replicas: 3, 431 Storage: nats.MemoryStorage, 432 }) 433 if err != nil { 434 t.Fatalf("Unexpected error: %v", err) 435 } 436 437 // Send in 100 messages. 438 msg, toSend := []byte("Hello MemoryStore"), 100 439 for i := 0; i < toSend; i++ { 440 if _, err = js.Publish("foo", msg); err != nil { 441 t.Fatalf("Unexpected publish error: %v", err) 442 } 443 } 444 // Now grab info for this stream. 445 si, err := js.StreamInfo("TEST") 446 if err != nil { 447 t.Fatalf("Unexpected error: %v", err) 448 } 449 if si == nil || si.Config.Name != "TEST" { 450 t.Fatalf("StreamInfo is not correct %+v", si) 451 } 452 if si.Cluster == nil || len(si.Cluster.Replicas) != 2 { 453 t.Fatalf("Cluster info is incorrect: %+v", si.Cluster) 454 } 455 // Check active state as well, shows that the owner answered. 456 if si.State.Msgs != uint64(toSend) { 457 t.Fatalf("Expected %d msgs, got bad state: %+v", toSend, si.State) 458 } 459 // Do a normal sub. 460 sub, err := js.SubscribeSync("foo") 461 if err != nil { 462 t.Fatalf("Unexpected error: %v", err) 463 } 464 465 checkSubsPending(t, sub, toSend) 466 } 467 468 func TestJetStreamClusterDelete(t *testing.T) { 469 c := createJetStreamClusterExplicit(t, "RNS", 3) 470 defer c.shutdown() 471 472 // Client for API requests. 473 nc, js := jsClientConnect(t, c.randomServer()) 474 defer nc.Close() 475 476 cfg := &nats.StreamConfig{ 477 Name: "C22", 478 Subjects: []string{"foo", "bar", "baz"}, 479 Replicas: 2, 480 Storage: nats.FileStorage, 481 MaxMsgs: 100, 482 } 483 if _, err := js.AddStream(cfg); err != nil { 484 t.Fatalf("Error adding stream: %v", err) 485 } 486 487 // Now create a consumer. 488 if _, err := js.AddConsumer("C22", &nats.ConsumerConfig{ 489 Durable: "dlc", 490 AckPolicy: nats.AckExplicitPolicy, 491 }); err != nil { 492 t.Fatalf("Error adding consumer: %v", err) 493 } 494 495 // Now delete the consumer. 496 if err := js.DeleteConsumer("C22", "dlc"); err != nil { 497 t.Fatalf("Error deleting consumer: %v", err) 498 } 499 500 // Now delete the stream. 501 if err := js.DeleteStream("C22"); err != nil { 502 t.Fatalf("Error deleting stream: %v", err) 503 } 504 505 // This will get the current information about usage and limits for this account. 506 checkFor(t, time.Second, 15*time.Millisecond, func() error { 507 info, err := js.AccountInfo() 508 if err != nil { 509 return err 510 } 511 if info.Streams != 0 { 512 return fmt.Errorf("Expected no remaining streams, got %d", info.Streams) 513 } 514 return nil 515 }) 516 } 517 518 func TestJetStreamClusterStreamPurge(t *testing.T) { 519 c := createJetStreamClusterExplicit(t, "R5S", 5) 520 defer c.shutdown() 521 522 s := c.randomServer() 523 524 // Client based API 525 nc, js := jsClientConnect(t, s) 526 defer nc.Close() 527 528 _, err := js.AddStream(&nats.StreamConfig{ 529 Name: "TEST", 530 Subjects: []string{"foo", "bar"}, 531 Replicas: 3, 532 }) 533 if err != nil { 534 t.Fatalf("Unexpected error: %v", err) 535 } 536 msg, toSend := []byte("Hello JS Clustering"), 100 537 for i := 0; i < toSend; i++ { 538 if _, err = js.Publish("foo", msg); err != nil { 539 t.Fatalf("Unexpected publish error: %v", err) 540 } 541 } 542 543 // Now grab info for this stream. 544 si, err := js.StreamInfo("TEST") 545 if err != nil { 546 t.Fatalf("Unexpected error: %v", err) 547 } 548 // Check active state as well, shows that the owner answered. 549 if si.State.Msgs != uint64(toSend) { 550 t.Fatalf("Expected %d msgs, got bad state: %+v", toSend, si.State) 551 } 552 553 // Now purge the stream. 554 if err := js.PurgeStream("TEST"); err != nil { 555 t.Fatalf("Unexpected purge error: %v", err) 556 } 557 si, err = js.StreamInfo("TEST") 558 if err != nil { 559 t.Fatalf("Unexpected error: %v", err) 560 } 561 562 if si.State.Msgs != 0 || si.State.FirstSeq != uint64(toSend+1) { 563 t.Fatalf("Expected no msgs, got: %+v", si.State) 564 } 565 } 566 567 func TestJetStreamClusterStreamUpdateSubjects(t *testing.T) { 568 c := createJetStreamClusterExplicit(t, "R3S", 3) 569 defer c.shutdown() 570 571 // Client based API 572 s := c.randomServer() 573 nc, js := jsClientConnect(t, s) 574 defer nc.Close() 575 576 cfg := &nats.StreamConfig{ 577 Name: "TEST", 578 Subjects: []string{"foo", "bar"}, 579 Replicas: 3, 580 } 581 582 if _, err := js.AddStream(cfg); err != nil { 583 t.Fatalf("Unexpected error: %v", err) 584 } 585 586 // Make sure we can update subjects. 587 cfg.Subjects = []string{"bar", "baz"} 588 589 si, err := js.UpdateStream(cfg) 590 if err != nil { 591 t.Fatalf("Unexpected error: %v", err) 592 } 593 if si == nil { 594 t.Fatalf("Expected a stream info, got none") 595 } 596 if !reflect.DeepEqual(si.Config.Subjects, cfg.Subjects) { 597 t.Fatalf("Expected subjects to be updated: got %+v", si.Config.Subjects) 598 } 599 // Make sure it registered 600 js2, err := nc.JetStream(nats.MaxWait(250 * time.Millisecond)) 601 if err != nil { 602 t.Fatalf("Unexpected error: %v", err) 603 } 604 605 if _, err = js2.Publish("foo", nil); err == nil { 606 t.Fatalf("Expected this to fail") 607 } 608 if _, err = js2.Publish("baz", nil); err != nil { 609 t.Fatalf("Unexpected publish error: %v", err) 610 } 611 } 612 613 func TestJetStreamClusterBadStreamUpdate(t *testing.T) { 614 c := createJetStreamClusterExplicit(t, "R3S", 3) 615 defer c.shutdown() 616 617 // Client based API 618 s := c.randomServer() 619 nc, js := jsClientConnect(t, s) 620 defer nc.Close() 621 622 cfg := &nats.StreamConfig{ 623 Name: "TEST", 624 Subjects: []string{"foo", "bar"}, 625 Replicas: 3, 626 } 627 628 if _, err := js.AddStream(cfg); err != nil { 629 t.Fatalf("Unexpected error: %v", err) 630 } 631 632 msg, toSend := []byte("Keep Me"), 50 633 for i := 0; i < toSend; i++ { 634 if _, err := js.Publish("foo", msg); err != nil { 635 t.Fatalf("Unexpected publish error: %v", err) 636 } 637 } 638 639 // Make sure a bad update will not remove our stream. 640 cfg.Subjects = []string{"foo..bar"} 641 if _, err := js.UpdateStream(cfg); err == nil || err == nats.ErrTimeout { 642 t.Fatalf("Expected error but got none or timeout") 643 } 644 645 // Make sure we did not delete our original stream. 646 si, err := js.StreamInfo("TEST") 647 if err != nil { 648 t.Fatalf("Unexpected error: %v", err) 649 } 650 if !reflect.DeepEqual(si.Config.Subjects, []string{"foo", "bar"}) { 651 t.Fatalf("Expected subjects to be original ones, got %+v", si.Config.Subjects) 652 } 653 } 654 655 func TestJetStreamClusterConsumerRedeliveredInfo(t *testing.T) { 656 c := createJetStreamClusterExplicit(t, "R3S", 3) 657 defer c.shutdown() 658 659 // Client based API 660 nc, js := jsClientConnect(t, c.randomServer()) 661 defer nc.Close() 662 663 cfg := &nats.StreamConfig{Name: "TEST"} 664 if _, err := js.AddStream(cfg); err != nil { 665 t.Fatalf("Unexpected error: %v", err) 666 } 667 668 if _, err := js.Publish("TEST", []byte("CI")); err != nil { 669 t.Fatalf("Unexpected publish error: %v", err) 670 } 671 672 sub, _ := nc.SubscribeSync("R") 673 sub.AutoUnsubscribe(2) 674 675 ci, err := js.AddConsumer("TEST", &nats.ConsumerConfig{ 676 DeliverSubject: "R", 677 AckPolicy: nats.AckExplicitPolicy, 678 AckWait: 100 * time.Millisecond, 679 }) 680 if err != nil { 681 t.Fatalf("Unexpected error: %v", err) 682 } 683 684 checkSubsPending(t, sub, 2) 685 sub.Unsubscribe() 686 687 ci, err = js.ConsumerInfo("TEST", ci.Name) 688 if err != nil { 689 t.Fatalf("Unexpected error: %v", err) 690 } 691 if ci.NumRedelivered != 1 { 692 t.Fatalf("Expected 1 redelivered, got %d", ci.NumRedelivered) 693 } 694 } 695 696 func TestJetStreamClusterConsumerState(t *testing.T) { 697 c := createJetStreamClusterExplicit(t, "R3S", 5) 698 defer c.shutdown() 699 700 s := c.randomServer() 701 702 // Client based API 703 nc, js := jsClientConnect(t, s) 704 defer nc.Close() 705 706 _, err := js.AddStream(&nats.StreamConfig{ 707 Name: "TEST", 708 Subjects: []string{"foo", "bar"}, 709 Replicas: 3, 710 }) 711 if err != nil { 712 t.Fatalf("Unexpected error: %v", err) 713 } 714 msg, toSend := []byte("Hello JS Clustering"), 10 715 for i := 0; i < toSend; i++ { 716 if _, err = js.Publish("foo", msg); err != nil { 717 t.Fatalf("Unexpected publish error: %v", err) 718 } 719 } 720 721 // Make sure we are not connected to any of the stream servers so that we do not do client reconnect 722 // when we take out the consumer leader. 723 if s.JetStreamIsStreamAssigned("$G", "TEST") { 724 nc.Close() 725 for _, ns := range c.servers { 726 if !ns.JetStreamIsStreamAssigned("$G", "TEST") { 727 s = ns 728 nc, js = jsClientConnect(t, s) 729 defer nc.Close() 730 break 731 } 732 } 733 } 734 735 sub, err := js.PullSubscribe("foo", "dlc") 736 if err != nil { 737 t.Fatalf("Unexpected error: %v", err) 738 } 739 740 // Pull 5 messages and ack. 741 for _, m := range fetchMsgs(t, sub, 5, 5*time.Second) { 742 m.AckSync() 743 } 744 745 // Let state propagate for exact comparison below. 746 time.Sleep(200 * time.Millisecond) 747 748 ci, err := sub.ConsumerInfo() 749 if err != nil { 750 t.Fatalf("Unexpected error getting consumer info: %v", err) 751 } 752 if ci.AckFloor.Consumer != 5 { 753 t.Fatalf("Expected ack floor of %d, got %d", 5, ci.AckFloor.Consumer) 754 } 755 756 c.consumerLeader("$G", "TEST", "dlc").Shutdown() 757 c.waitOnConsumerLeader("$G", "TEST", "dlc") 758 759 nci, err := sub.ConsumerInfo() 760 if err != nil { 761 t.Fatalf("Unexpected error getting consumer info: %v", err) 762 } 763 // nil out timestamp for better comparison 764 nci.Delivered.Last, ci.Delivered.Last = nil, nil 765 if nci.Delivered != ci.Delivered { 766 t.Fatalf("Consumer delivered did not match after leader switch, wanted %+v, got %+v", ci.Delivered, nci.Delivered) 767 } 768 nci.AckFloor.Last, ci.AckFloor.Last = nil, nil 769 if nci.AckFloor != ci.AckFloor { 770 t.Fatalf("Consumer ackfloor did not match after leader switch, wanted %+v, got %+v", ci.AckFloor, nci.AckFloor) 771 } 772 773 // Now make sure we can receive new messages. 774 // Pull last 5. 775 for _, m := range fetchMsgs(t, sub, 5, 5*time.Second) { 776 m.AckSync() 777 } 778 779 nci, _ = sub.ConsumerInfo() 780 if nci.Delivered.Consumer != 10 || nci.Delivered.Stream != 10 { 781 t.Fatalf("Received bad delivered: %+v", nci.Delivered) 782 } 783 if nci.AckFloor.Consumer != 10 || nci.AckFloor.Stream != 10 { 784 t.Fatalf("Received bad ackfloor: %+v", nci.AckFloor) 785 } 786 if nci.NumAckPending != 0 { 787 t.Fatalf("Received bad ackpending: %+v", nci.NumAckPending) 788 } 789 } 790 791 func TestJetStreamClusterFullConsumerState(t *testing.T) { 792 c := createJetStreamClusterExplicit(t, "R3S", 3) 793 defer c.shutdown() 794 795 s := c.randomServer() 796 797 // Client based API 798 nc, js := jsClientConnect(t, s) 799 defer nc.Close() 800 801 _, err := js.AddStream(&nats.StreamConfig{ 802 Name: "TEST", 803 Subjects: []string{"foo", "bar"}, 804 Replicas: 3, 805 }) 806 if err != nil { 807 t.Fatalf("Unexpected error: %v", err) 808 } 809 msg, toSend := []byte("Hello JS Clustering"), 10 810 for i := 0; i < toSend; i++ { 811 if _, err = js.Publish("foo", msg); err != nil { 812 t.Fatalf("Unexpected publish error: %v", err) 813 } 814 } 815 816 sub, err := js.PullSubscribe("foo", "dlc") 817 if err != nil { 818 t.Fatalf("Unexpected error: %v", err) 819 } 820 821 fetchMsgs(t, sub, 1, 5*time.Second) 822 823 // Now purge the stream. 824 if err := js.PurgeStream("TEST"); err != nil { 825 t.Fatalf("Unexpected purge error: %v", err) 826 } 827 } 828 829 func TestJetStreamClusterMetaSnapshotsAndCatchup(t *testing.T) { 830 c := createJetStreamClusterExplicit(t, "R3S", 3) 831 defer c.shutdown() 832 833 // Shut one down. 834 rs := c.randomServer() 835 rs.Shutdown() 836 837 c.waitOnLeader() 838 s := c.leader() 839 840 // Client based API 841 nc, js := jsClientConnect(t, s) 842 defer nc.Close() 843 844 numStreams := 4 845 // Create 4 streams 846 // FIXME(dlc) - R2 make sure we place properly. 847 for i := 0; i < numStreams; i++ { 848 sn := fmt.Sprintf("T-%d", i+1) 849 _, err := js.AddStream(&nats.StreamConfig{Name: sn}) 850 if err != nil { 851 t.Fatalf("Unexpected error: %v", err) 852 } 853 } 854 855 c.leader().JetStreamSnapshotMeta() 856 857 rs = c.restartServer(rs) 858 c.checkClusterFormed() 859 c.waitOnServerCurrent(rs) 860 861 rs.Shutdown() 862 c.waitOnLeader() 863 864 for i := 0; i < numStreams; i++ { 865 sn := fmt.Sprintf("T-%d", i+1) 866 err := js.DeleteStream(sn) 867 if err != nil { 868 t.Fatalf("Unexpected error: %v", err) 869 } 870 } 871 872 rs = c.restartServer(rs) 873 c.checkClusterFormed() 874 c.waitOnServerCurrent(rs) 875 } 876 877 func TestJetStreamClusterMetaSnapshotsMultiChange(t *testing.T) { 878 c := createJetStreamClusterExplicit(t, "R3S", 2) 879 defer c.shutdown() 880 881 s := c.leader() 882 883 // Client based API 884 nc, js := jsClientConnect(t, s) 885 defer nc.Close() 886 887 // Add in 2 streams with 1 consumer each. 888 if _, err := js.AddStream(&nats.StreamConfig{Name: "S1"}); err != nil { 889 t.Fatalf("Unexpected error: %v", err) 890 } 891 c.waitOnStreamLeader(globalAccountName, "S1") 892 _, err := js.AddConsumer("S1", &nats.ConsumerConfig{Durable: "S1C1", AckPolicy: nats.AckExplicitPolicy}) 893 if err != nil { 894 t.Fatalf("Unexpected error: %v", err) 895 } 896 c.waitOnConsumerLeader(globalAccountName, "S1", "S1C1") 897 898 if _, err = js.AddStream(&nats.StreamConfig{Name: "S2"}); err != nil { 899 t.Fatalf("Unexpected error: %v", err) 900 } 901 c.waitOnStreamLeader(globalAccountName, "S2") 902 _, err = js.AddConsumer("S2", &nats.ConsumerConfig{Durable: "S2C1", AckPolicy: nats.AckExplicitPolicy}) 903 if err != nil { 904 t.Fatalf("Unexpected error: %v", err) 905 } 906 c.waitOnConsumerLeader(globalAccountName, "S2", "S2C1") 907 908 // Add in a new server to the group. This way we know we can delete the original streams and consumers. 909 rs := c.addInNewServer() 910 c.waitOnServerCurrent(rs) 911 rsn := rs.Name() 912 913 // Shut it down. 914 rs.Shutdown() 915 916 // Wait for the peer to be removed. 917 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 918 for _, p := range s.JetStreamClusterPeers() { 919 if p == rsn { 920 return fmt.Errorf("Old server still in peer set") 921 } 922 } 923 return nil 924 }) 925 926 // We want to make changes here that test each delta scenario for the meta snapshots. 927 // Add new stream and consumer. 928 if _, err = js.AddStream(&nats.StreamConfig{Name: "S3"}); err != nil { 929 t.Fatalf("Unexpected error: %v", err) 930 } 931 c.waitOnStreamLeader(globalAccountName, "S3") 932 _, err = js.AddConsumer("S3", &nats.ConsumerConfig{Durable: "S3C1", AckPolicy: nats.AckExplicitPolicy}) 933 if err != nil { 934 t.Fatalf("Unexpected error: %v", err) 935 } 936 c.waitOnConsumerLeader(globalAccountName, "S3", "S3C1") 937 // Delete stream S2 938 resp, _ := nc.Request(fmt.Sprintf(JSApiStreamDeleteT, "S2"), nil, time.Second) 939 var dResp JSApiStreamDeleteResponse 940 if err := json.Unmarshal(resp.Data, &dResp); err != nil { 941 t.Fatalf("Unexpected error: %v", err) 942 } 943 if !dResp.Success || dResp.Error != nil { 944 t.Fatalf("Got a bad response %+v", dResp.Error) 945 } 946 // Delete the consumer on S1 but add another. 947 resp, _ = nc.Request(fmt.Sprintf(JSApiConsumerDeleteT, "S1", "S1C1"), nil, time.Second) 948 var cdResp JSApiConsumerDeleteResponse 949 if err = json.Unmarshal(resp.Data, &cdResp); err != nil { 950 t.Fatalf("Unexpected error: %v", err) 951 } 952 if !cdResp.Success || cdResp.Error != nil { 953 t.Fatalf("Got a bad response %+v", cdResp) 954 } 955 // Add new consumer on S1 956 _, err = js.AddConsumer("S1", &nats.ConsumerConfig{Durable: "S1C2", AckPolicy: nats.AckExplicitPolicy}) 957 if err != nil { 958 t.Fatalf("Unexpected error: %v", err) 959 } 960 c.waitOnConsumerLeader(globalAccountName, "S1", "S1C2") 961 962 cl := c.leader() 963 cl.JetStreamSnapshotMeta() 964 c.waitOnServerCurrent(cl) 965 966 rs = c.restartServer(rs) 967 c.checkClusterFormed() 968 c.waitOnServerCurrent(rs) 969 } 970 971 func TestJetStreamClusterStreamSynchedTimeStamps(t *testing.T) { 972 c := createJetStreamClusterExplicit(t, "R3S", 3) 973 defer c.shutdown() 974 975 // Client based API 976 s := c.randomServer() 977 nc, js := jsClientConnect(t, s) 978 defer nc.Close() 979 980 _, err := js.AddStream(&nats.StreamConfig{Name: "foo", Storage: nats.MemoryStorage, Replicas: 3}) 981 if err != nil { 982 t.Fatalf("Unexpected error: %v", err) 983 } 984 985 if _, err = js.Publish("foo", []byte("TSS")); err != nil { 986 t.Fatalf("Unexpected publish error: %v", err) 987 } 988 989 // Grab the message and timestamp from our current leader 990 sub, err := js.SubscribeSync("foo") 991 if err != nil { 992 t.Fatalf("Unexpected error: %v", err) 993 } 994 m, err := sub.NextMsg(time.Second) 995 if err != nil { 996 t.Fatalf("Unexpected error: %v", err) 997 } 998 meta, _ := m.Metadata() 999 1000 sub.Unsubscribe() 1001 1002 sl := c.streamLeader("$G", "foo") 1003 1004 sl.Shutdown() 1005 1006 c.waitOnLeader() 1007 c.waitOnStreamLeader("$G", "foo") 1008 1009 nc, js = jsClientConnect(t, c.leader()) 1010 defer nc.Close() 1011 1012 sm, err := js.GetMsg("foo", 1) 1013 if err != nil { 1014 t.Fatalf("Unexpected error: %v", err) 1015 } 1016 if !sm.Time.Equal(meta.Timestamp) { 1017 t.Fatalf("Expected same timestamps, got %v vs %v", sm.Time, meta.Timestamp) 1018 } 1019 } 1020 1021 // Test to mimic what R.I. was seeing. 1022 func TestJetStreamClusterRestoreSingleConsumer(t *testing.T) { 1023 c := createJetStreamClusterExplicit(t, "R3S", 3) 1024 defer c.shutdown() 1025 1026 // Client based API 1027 s := c.randomServer() 1028 nc, js := jsClientConnect(t, s) 1029 defer nc.Close() 1030 1031 _, err := js.AddStream(&nats.StreamConfig{Name: "foo"}) 1032 if err != nil { 1033 t.Fatalf("Unexpected error: %v", err) 1034 } 1035 1036 if _, err = js.Publish("foo", []byte("TSS")); err != nil { 1037 t.Fatalf("Unexpected publish error: %v", err) 1038 } 1039 1040 sub, err := js.SubscribeSync("foo", nats.Durable("dlc")) 1041 if err != nil { 1042 t.Fatalf("Unexpected error: %v", err) 1043 } 1044 if m, err := sub.NextMsg(time.Second); err != nil { 1045 t.Fatalf("Unexpected error: %v", err) 1046 } else { 1047 m.AckSync() 1048 } 1049 1050 c.stopAll() 1051 c.restartAll() 1052 c.waitOnLeader() 1053 c.waitOnStreamLeader("$G", "foo") 1054 1055 s = c.randomServer() 1056 nc, js = jsClientConnect(t, s) 1057 defer nc.Close() 1058 1059 var names []string 1060 for name := range js.StreamNames() { 1061 names = append(names, name) 1062 } 1063 if len(names) != 1 { 1064 t.Fatalf("Expected only 1 stream but got %d", len(names)) 1065 } 1066 1067 // Now do detailed version. 1068 var infos []*nats.StreamInfo 1069 for info := range js.StreamsInfo() { 1070 infos = append(infos, info) 1071 } 1072 if len(infos) != 1 { 1073 t.Fatalf("Expected 1 stream but got %d", len(infos)) 1074 } 1075 si, err := js.StreamInfo("foo") 1076 if err != nil { 1077 t.Fatalf("Unexpected error: %v", err) 1078 } 1079 if si == nil || si.Config.Name != "foo" { 1080 t.Fatalf("StreamInfo is not correct %+v", si) 1081 } 1082 1083 // Now check for consumer. 1084 names = names[:0] 1085 for name := range js.ConsumerNames("foo") { 1086 names = append(names, name) 1087 } 1088 if len(names) != 1 { 1089 t.Fatalf("Expected 1 consumer but got %d", len(names)) 1090 } 1091 } 1092 1093 func TestJetStreamClusterMaxBytesForStream(t *testing.T) { 1094 // Has max_file_store of 2GB 1095 c := createJetStreamClusterExplicit(t, "R3S", 3) 1096 defer c.shutdown() 1097 1098 // Client based API 1099 nc, js := jsClientConnect(t, c.randomServer()) 1100 defer nc.Close() 1101 1102 info, err := js.AccountInfo() 1103 if err != nil { 1104 t.Fatalf("Unexpected error: %v", err) 1105 } 1106 // Make sure we still are dynamic. 1107 if info.Limits.MaxStore != -1 || info.Limits.MaxMemory != -1 { 1108 t.Fatalf("Expected dynamic limits for the account, got %+v\n", info.Limits) 1109 } 1110 // Stream config. 1111 cfg := &nats.StreamConfig{ 1112 Name: "TEST", 1113 Replicas: 2, 1114 MaxBytes: 2 * 1024 * 1024 * 1024, // 2GB 1115 } 1116 _, err = js.AddStream(cfg) 1117 require_NoError(t, err) 1118 1119 // Make sure going over the single server limit though is enforced (for now). 1120 cfg.Name = "TEST2" 1121 cfg.MaxBytes *= 2 1122 _, err = js.AddStream(cfg) 1123 require_Contains(t, err.Error(), "no suitable peers for placement") 1124 } 1125 1126 func TestJetStreamClusterStreamPublishWithActiveConsumers(t *testing.T) { 1127 c := createJetStreamClusterExplicit(t, "R3S", 3) 1128 defer c.shutdown() 1129 1130 s := c.randomServer() 1131 1132 // Client based API 1133 nc, js := jsClientConnect(t, s) 1134 defer nc.Close() 1135 1136 _, err := js.AddStream(&nats.StreamConfig{Name: "foo", Replicas: 3}) 1137 if err != nil { 1138 t.Fatalf("Unexpected error: %v", err) 1139 } 1140 1141 if _, err = js.Publish("foo", []byte("TSS")); err != nil { 1142 t.Fatalf("Unexpected publish error: %v", err) 1143 } 1144 1145 sub, err := js.SubscribeSync("foo", nats.Durable("dlc")) 1146 if err != nil { 1147 t.Fatalf("Unexpected error: %v", err) 1148 } 1149 1150 // FIXME(dlc) - Need to track this down. 1151 c.waitOnConsumerLeader("$G", "foo", "dlc") 1152 1153 if m, err := sub.NextMsg(time.Second); err != nil { 1154 t.Fatalf("Unexpected error: %v", err) 1155 } else { 1156 m.AckSync() 1157 } 1158 1159 // Send 10 messages. 1160 for i := 1; i <= 10; i++ { 1161 payload := []byte(fmt.Sprintf("MSG-%d", i)) 1162 if _, err = js.Publish("foo", payload); err != nil { 1163 t.Fatalf("Unexpected publish error: %v", err) 1164 } 1165 } 1166 checkSubsPending(t, sub, 10) 1167 // Sanity check for duplicate deliveries.. 1168 if nmsgs, _, _ := sub.Pending(); nmsgs > 10 { 1169 t.Fatalf("Expected only %d responses, got %d more", 10, nmsgs) 1170 } 1171 for i := 1; i <= 10; i++ { 1172 m, err := sub.NextMsg(time.Second) 1173 if err != nil { 1174 t.Fatalf("Unexpected error: %v", err) 1175 } 1176 payload := []byte(fmt.Sprintf("MSG-%d", i)) 1177 if !bytes.Equal(m.Data, payload) { 1178 t.Fatalf("Did not get expected msg, expected %q, got %q", payload, m.Data) 1179 } 1180 } 1181 1182 ci, err := sub.ConsumerInfo() 1183 if err != nil { 1184 t.Fatalf("Unexpected error getting consumer info: %v", err) 1185 } 1186 1187 c.consumerLeader("$G", "foo", "dlc").Shutdown() 1188 c.waitOnConsumerLeader("$G", "foo", "dlc") 1189 1190 ci2, err := sub.ConsumerInfo() 1191 if err != nil { 1192 t.Fatalf("Unexpected error getting consumer info: %v", err) 1193 } 1194 1195 ci.Cluster = nil 1196 ci2.Cluster = nil 1197 1198 // nil out timestamp for better comparison 1199 ci.Delivered.Last, ci2.Delivered.Last = nil, nil 1200 ci.AckFloor.Last, ci2.AckFloor.Last = nil, nil 1201 if !reflect.DeepEqual(ci, ci2) { 1202 t.Fatalf("Consumer info did not match: %+v vs %+v", ci, ci2) 1203 } 1204 1205 // In case the server above was also stream leader. 1206 c.waitOnStreamLeader("$G", "foo") 1207 1208 // Now send more.. 1209 // Send 10 more messages. 1210 for i := 11; i <= 20; i++ { 1211 payload := []byte(fmt.Sprintf("MSG-%d", i)) 1212 if _, err = js.Publish("foo", payload); err != nil { 1213 t.Fatalf("Unexpected publish error: %v", err) 1214 } 1215 } 1216 1217 checkSubsPending(t, sub, 10) 1218 // Sanity check for duplicate deliveries.. 1219 if nmsgs, _, _ := sub.Pending(); nmsgs > 10 { 1220 t.Fatalf("Expected only %d responses, got %d more", 10, nmsgs) 1221 } 1222 1223 for i := 11; i <= 20; i++ { 1224 m, err := sub.NextMsg(time.Second) 1225 if err != nil { 1226 t.Fatalf("Unexpected error: %v", err) 1227 } 1228 payload := []byte(fmt.Sprintf("MSG-%d", i)) 1229 if !bytes.Equal(m.Data, payload) { 1230 t.Fatalf("Did not get expected msg, expected %q, got %q", payload, m.Data) 1231 } 1232 } 1233 } 1234 1235 func TestJetStreamClusterStreamOverlapSubjects(t *testing.T) { 1236 c := createJetStreamClusterExplicit(t, "R3", 3) 1237 defer c.shutdown() 1238 1239 // Client based API 1240 s := c.randomServer() 1241 nc, js := jsClientConnect(t, s) 1242 defer nc.Close() 1243 1244 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"foo"}}); err != nil { 1245 t.Fatalf("Unexpected error: %v", err) 1246 } 1247 1248 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST2", Subjects: []string{"foo"}}); err == nil || err == nats.ErrTimeout { 1249 t.Fatalf("Expected error but got none or timeout: %v", err) 1250 } 1251 1252 // Now grab list of streams and make sure the second is not there. 1253 var names []string 1254 for name := range js.StreamNames() { 1255 names = append(names, name) 1256 } 1257 if len(names) != 1 { 1258 t.Fatalf("Expected only 1 stream but got %d", len(names)) 1259 } 1260 1261 // Now do a detailed version. 1262 var infos []*nats.StreamInfo 1263 for info := range js.StreamsInfo() { 1264 infos = append(infos, info) 1265 } 1266 if len(infos) != 1 { 1267 t.Fatalf("Expected only 1 stream but got %d", len(infos)) 1268 } 1269 } 1270 1271 func TestJetStreamClusterStreamInfoList(t *testing.T) { 1272 c := createJetStreamClusterExplicit(t, "R3S", 3) 1273 defer c.shutdown() 1274 1275 // Client based API 1276 s := c.randomServer() 1277 nc, js := jsClientConnect(t, s) 1278 defer nc.Close() 1279 1280 createStream := func(name string) { 1281 t.Helper() 1282 if _, err := js.AddStream(&nats.StreamConfig{Name: name}); err != nil { 1283 t.Fatalf("Unexpected error: %v", err) 1284 } 1285 } 1286 1287 createStream("foo") 1288 createStream("bar") 1289 createStream("baz") 1290 1291 sendBatch := func(subject string, n int) { 1292 t.Helper() 1293 // Send a batch to a given subject. 1294 for i := 0; i < n; i++ { 1295 if _, err := js.Publish(subject, []byte("OK")); err != nil { 1296 t.Fatalf("Unexpected publish error: %v", err) 1297 } 1298 } 1299 } 1300 1301 sendBatch("foo", 10) 1302 sendBatch("bar", 22) 1303 sendBatch("baz", 33) 1304 1305 // Now get the stream list info. 1306 var infos []*nats.StreamInfo 1307 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 1308 infos = infos[:0] 1309 for info := range js.StreamsInfo() { 1310 infos = append(infos, info) 1311 } 1312 if len(infos) != 3 { 1313 return fmt.Errorf("StreamInfo expected 3 results, got %d", len(infos)) 1314 } 1315 return nil 1316 }) 1317 1318 for _, si := range infos { 1319 switch si.Config.Name { 1320 case "foo": 1321 if si.State.Msgs != 10 { 1322 t.Fatalf("Expected %d msgs but got %d", 10, si.State.Msgs) 1323 } 1324 case "bar": 1325 if si.State.Msgs != 22 { 1326 t.Fatalf("Expected %d msgs but got %d", 22, si.State.Msgs) 1327 } 1328 case "baz": 1329 if si.State.Msgs != 33 { 1330 t.Fatalf("Expected %d msgs but got %d", 33, si.State.Msgs) 1331 } 1332 } 1333 } 1334 } 1335 1336 func TestJetStreamClusterConsumerInfoList(t *testing.T) { 1337 c := createJetStreamClusterExplicit(t, "R3S", 3) 1338 defer c.shutdown() 1339 1340 // Client based API 1341 s := c.randomServer() 1342 nc, js := jsClientConnect(t, s) 1343 defer nc.Close() 1344 1345 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 3}); err != nil { 1346 t.Fatalf("Unexpected error: %v", err) 1347 } 1348 1349 // Place messages so we can generate consumer state. 1350 for i := 0; i < 10; i++ { 1351 if _, err := js.Publish("TEST", []byte("OK")); err != nil { 1352 t.Fatalf("Unexpected publish error: %v", err) 1353 } 1354 } 1355 1356 createConsumer := func(name string) *nats.Subscription { 1357 t.Helper() 1358 sub, err := js.PullSubscribe("TEST", name) 1359 if err != nil { 1360 t.Fatalf("Unexpected error: %v", err) 1361 } 1362 return sub 1363 } 1364 1365 subFoo := createConsumer("foo") 1366 subBar := createConsumer("bar") 1367 subBaz := createConsumer("baz") 1368 1369 // Place consumers in various states. 1370 for _, ss := range []struct { 1371 sub *nats.Subscription 1372 fetch int 1373 ack int 1374 }{ 1375 {subFoo, 4, 2}, 1376 {subBar, 2, 0}, 1377 {subBaz, 8, 6}, 1378 } { 1379 msgs := fetchMsgs(t, ss.sub, ss.fetch, 5*time.Second) 1380 for i := 0; i < ss.ack; i++ { 1381 msgs[i].AckSync() 1382 } 1383 } 1384 1385 // Now get the consumer list info. 1386 var infos []*nats.ConsumerInfo 1387 for info := range js.ConsumersInfo("TEST") { 1388 infos = append(infos, info) 1389 } 1390 if len(infos) != 3 { 1391 t.Fatalf("ConsumerInfo expected 3 results, got %d", len(infos)) 1392 } 1393 for _, ci := range infos { 1394 switch ci.Name { 1395 case "foo": 1396 if ci.Delivered.Consumer != 4 { 1397 t.Fatalf("Expected %d delivered but got %d", 4, ci.Delivered.Consumer) 1398 } 1399 if ci.AckFloor.Consumer != 2 { 1400 t.Fatalf("Expected %d for ack floor but got %d", 2, ci.AckFloor.Consumer) 1401 } 1402 case "bar": 1403 if ci.Delivered.Consumer != 2 { 1404 t.Fatalf("Expected %d delivered but got %d", 2, ci.Delivered.Consumer) 1405 } 1406 if ci.AckFloor.Consumer != 0 { 1407 t.Fatalf("Expected %d for ack floor but got %d", 0, ci.AckFloor.Consumer) 1408 } 1409 case "baz": 1410 if ci.Delivered.Consumer != 8 { 1411 t.Fatalf("Expected %d delivered but got %d", 8, ci.Delivered.Consumer) 1412 } 1413 if ci.AckFloor.Consumer != 6 { 1414 t.Fatalf("Expected %d for ack floor but got %d", 6, ci.AckFloor.Consumer) 1415 } 1416 } 1417 } 1418 } 1419 1420 func TestJetStreamClusterStreamUpdate(t *testing.T) { 1421 c := createJetStreamClusterExplicit(t, "R3S", 3) 1422 defer c.shutdown() 1423 1424 // Client based API 1425 s := c.randomServer() 1426 nc, js := jsClientConnect(t, s) 1427 defer nc.Close() 1428 1429 sc := &nats.StreamConfig{ 1430 Name: "TEST", 1431 Subjects: []string{"foo"}, 1432 Replicas: 3, 1433 MaxMsgs: 10, 1434 Discard: DiscardNew, 1435 } 1436 1437 if _, err := js.AddStream(sc); err != nil { 1438 t.Fatalf("Unexpected error: %v", err) 1439 } 1440 1441 for i := 1; i <= int(sc.MaxMsgs); i++ { 1442 msg := []byte(fmt.Sprintf("HELLO JSC-%d", i)) 1443 if _, err := js.Publish("foo", msg); err != nil { 1444 t.Fatalf("Unexpected publish error: %v", err) 1445 } 1446 } 1447 1448 // Expect error here. 1449 if _, err := js.Publish("foo", []byte("fail")); err == nil { 1450 t.Fatalf("Expected publish to fail") 1451 } 1452 1453 // Now update MaxMsgs, select non-leader 1454 s = c.randomNonStreamLeader("$G", "TEST") 1455 nc, js = jsClientConnect(t, s) 1456 defer nc.Close() 1457 1458 sc.MaxMsgs = 20 1459 si, err := js.UpdateStream(sc) 1460 if err != nil { 1461 t.Fatalf("Unexpected error: %v", err) 1462 } 1463 if si.Config.MaxMsgs != 20 { 1464 t.Fatalf("Expected to have config updated with max msgs of %d, got %d", 20, si.Config.MaxMsgs) 1465 } 1466 1467 // Do one that will fail. Wait and make sure we only are getting one response. 1468 sc.Name = "TEST22" 1469 1470 rsub, _ := nc.SubscribeSync(nats.NewInbox()) 1471 defer rsub.Unsubscribe() 1472 nc.Flush() 1473 1474 req, _ := json.Marshal(sc) 1475 if err := nc.PublishRequest(fmt.Sprintf(JSApiStreamUpdateT, "TEST"), rsub.Subject, req); err != nil { 1476 t.Fatalf("Unexpected error: %v", err) 1477 } 1478 1479 // Wait incase more than one reply sent. 1480 time.Sleep(250 * time.Millisecond) 1481 1482 if nmsgs, _, _ := rsub.Pending(); err != nil || nmsgs != 1 { 1483 t.Fatalf("Expected only one response, got %d", nmsgs) 1484 } 1485 1486 m, err := rsub.NextMsg(time.Second) 1487 if err != nil { 1488 t.Fatalf("Error getting message: %v", err) 1489 } 1490 1491 var scResp JSApiStreamCreateResponse 1492 if err := json.Unmarshal(m.Data, &scResp); err != nil { 1493 t.Fatalf("Unexpected error: %v", err) 1494 } 1495 if scResp.StreamInfo != nil || scResp.Error == nil { 1496 t.Fatalf("Did not receive correct response: %+v", scResp) 1497 } 1498 } 1499 1500 func TestJetStreamClusterStreamExtendedUpdates(t *testing.T) { 1501 c := createJetStreamClusterExplicit(t, "R3S", 3) 1502 defer c.shutdown() 1503 1504 // Client based API 1505 s := c.randomServer() 1506 nc, js := jsClientConnect(t, s) 1507 defer nc.Close() 1508 1509 cfg := &nats.StreamConfig{ 1510 Name: "TEST", 1511 Subjects: []string{"foo"}, 1512 Replicas: 3, 1513 } 1514 if _, err := js.AddStream(cfg); err != nil { 1515 t.Fatalf("Unexpected error: %v", err) 1516 } 1517 1518 updateStream := func() *nats.StreamInfo { 1519 si, err := js.UpdateStream(cfg) 1520 if err != nil { 1521 t.Fatalf("Unexpected error: %v", err) 1522 } 1523 return si 1524 } 1525 1526 // Subjects can be updated 1527 cfg.Subjects = []string{"bar", "baz"} 1528 if si := updateStream(); !reflect.DeepEqual(si.Config.Subjects, cfg.Subjects) { 1529 t.Fatalf("Did not get expected stream info: %+v", si) 1530 } 1531 // Mirror changes are not supported for now 1532 cfg.Subjects = nil 1533 cfg.Mirror = &nats.StreamSource{Name: "ORDERS"} 1534 _, err := js.UpdateStream(cfg) 1535 require_Error(t, err, NewJSStreamMirrorNotUpdatableError()) 1536 } 1537 1538 func TestJetStreamClusterDoubleAdd(t *testing.T) { 1539 c := createJetStreamClusterExplicit(t, "R32", 2) 1540 defer c.shutdown() 1541 1542 s := c.randomServer() 1543 1544 // Client based API 1545 nc, js := jsClientConnect(t, s) 1546 defer nc.Close() 1547 1548 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 2}); err != nil { 1549 t.Fatalf("Unexpected error: %v", err) 1550 } 1551 // Streams should allow double add. 1552 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 2}); err != nil { 1553 t.Fatalf("Unexpected error: %v", err) 1554 } 1555 1556 // Check Consumers. 1557 cfg := &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy} 1558 if _, err := js.AddConsumer("TEST", cfg); err != nil { 1559 t.Fatalf("Unexpected error: %v", err) 1560 } 1561 // Check double add ok. 1562 if _, err := js.AddConsumer("TEST", cfg); err != nil { 1563 t.Fatalf("Expected no error but got: %v", err) 1564 } 1565 } 1566 1567 func TestJetStreamClusterDefaultMaxAckPending(t *testing.T) { 1568 c := createJetStreamClusterExplicit(t, "R32", 2) 1569 defer c.shutdown() 1570 1571 s := c.randomServer() 1572 1573 // Client based API 1574 nc, js := jsClientConnect(t, s) 1575 defer nc.Close() 1576 1577 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 2}); err != nil { 1578 t.Fatalf("Unexpected error: %v", err) 1579 } 1580 1581 // Do Consumers too. 1582 cfg := &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy} 1583 ci, err := js.AddConsumer("TEST", cfg) 1584 if err != nil { 1585 t.Fatalf("Unexpected error: %v", err) 1586 } 1587 1588 // Check that we have a default set now for the max ack pending. 1589 if ci.Config.MaxAckPending != JsDefaultMaxAckPending { 1590 t.Fatalf("Expected a default for max ack pending of %d, got %d", JsDefaultMaxAckPending, ci.Config.MaxAckPending) 1591 } 1592 } 1593 1594 func TestJetStreamClusterStreamNormalCatchup(t *testing.T) { 1595 c := createJetStreamClusterExplicit(t, "R3S", 3) 1596 defer c.shutdown() 1597 1598 // Client based API 1599 s := c.randomServer() 1600 nc, js := jsClientConnect(t, s) 1601 defer nc.Close() 1602 1603 _, err := js.AddStream(&nats.StreamConfig{ 1604 Name: "TEST", 1605 Subjects: []string{"foo", "bar"}, 1606 Replicas: 3, 1607 }) 1608 if err != nil { 1609 t.Fatalf("Unexpected error: %v", err) 1610 } 1611 1612 toSend := 10 1613 for i := 1; i <= toSend; i++ { 1614 msg := []byte(fmt.Sprintf("HELLO JSC-%d", i)) 1615 if _, err = js.Publish("foo", msg); err != nil { 1616 t.Fatalf("Unexpected publish error: %v", err) 1617 } 1618 } 1619 1620 sl := c.streamLeader("$G", "TEST") 1621 sl.Shutdown() 1622 c.waitOnStreamLeader("$G", "TEST") 1623 1624 // Send 10 more while one replica offline. 1625 for i := toSend; i <= toSend*2; i++ { 1626 msg := []byte(fmt.Sprintf("HELLO JSC-%d", i)) 1627 if _, err = js.Publish("foo", msg); err != nil { 1628 t.Fatalf("Unexpected publish error: %v", err) 1629 } 1630 } 1631 1632 // Delete the first from the second batch. 1633 dreq := JSApiMsgDeleteRequest{Seq: uint64(toSend)} 1634 dreqj, err := json.Marshal(dreq) 1635 if err != nil { 1636 t.Fatalf("Unexpected error: %v", err) 1637 } 1638 resp, _ := nc.Request(fmt.Sprintf(JSApiMsgDeleteT, "TEST"), dreqj, time.Second) 1639 var delMsgResp JSApiMsgDeleteResponse 1640 if err = json.Unmarshal(resp.Data, &delMsgResp); err != nil { 1641 t.Fatalf("Unexpected error: %v", err) 1642 } 1643 if !delMsgResp.Success || delMsgResp.Error != nil { 1644 t.Fatalf("Got a bad response %+v", delMsgResp.Error) 1645 } 1646 1647 sl = c.restartServer(sl) 1648 c.checkClusterFormed() 1649 1650 c.waitOnServerCurrent(sl) 1651 c.waitOnStreamCurrent(sl, "$G", "TEST") 1652 } 1653 1654 func TestJetStreamClusterStreamSnapshotCatchup(t *testing.T) { 1655 c := createJetStreamClusterExplicit(t, "R3S", 3) 1656 defer c.shutdown() 1657 1658 s := c.randomServer() 1659 1660 // Client based API 1661 nc, js := jsClientConnect(t, s) 1662 defer nc.Close() 1663 1664 _, err := js.AddStream(&nats.StreamConfig{ 1665 Name: "TEST", 1666 Subjects: []string{"foo"}, 1667 Replicas: 3, 1668 }) 1669 if err != nil { 1670 t.Fatalf("Unexpected error: %v", err) 1671 } 1672 1673 pseq := uint64(1) 1674 sendBatch := func(n int) { 1675 t.Helper() 1676 // Send a batch. 1677 for i := 0; i < n; i++ { 1678 msg := []byte(fmt.Sprintf("HELLO JSC-%d", pseq)) 1679 if _, err = js.Publish("foo", msg); err != nil { 1680 t.Fatalf("Unexpected publish error: %v", err) 1681 } 1682 pseq++ 1683 } 1684 } 1685 1686 sendBatch(2) 1687 1688 sl := c.streamLeader("$G", "TEST") 1689 sl.Shutdown() 1690 c.waitOnStreamLeader("$G", "TEST") 1691 1692 sendBatch(100) 1693 1694 deleteMsg := func(seq uint64) { 1695 if err := js.DeleteMsg("TEST", seq); err != nil { 1696 t.Fatalf("Unexpected error: %v", err) 1697 } 1698 } 1699 1700 // Delete the first from the second batch. 1701 deleteMsg(pseq / 2) 1702 // Delete the next one too. 1703 deleteMsg(pseq/2 + 1) 1704 1705 nsl := c.streamLeader("$G", "TEST") 1706 nsl.JetStreamSnapshotStream("$G", "TEST") 1707 1708 // Do some activity post snapshot as well. 1709 // Delete next to last. 1710 deleteMsg(pseq - 2) 1711 // Send another batch. 1712 sendBatch(100) 1713 1714 mset, err := nsl.GlobalAccount().lookupStream("TEST") 1715 require_NoError(t, err) 1716 ostate := mset.stateWithDetail(true) 1717 1718 sl = c.restartServer(sl) 1719 c.checkClusterFormed() 1720 1721 c.waitOnServerCurrent(sl) 1722 c.waitOnStreamCurrent(sl, "$G", "TEST") 1723 1724 mset, err = sl.GlobalAccount().lookupStream("TEST") 1725 require_NoError(t, err) 1726 1727 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 1728 if nstate := mset.stateWithDetail(true); !reflect.DeepEqual(ostate, nstate) { 1729 return fmt.Errorf("States do not match after recovery: %+v vs %+v", ostate, nstate) 1730 } 1731 return nil 1732 }) 1733 } 1734 1735 func TestJetStreamClusterDeleteMsg(t *testing.T) { 1736 c := createJetStreamClusterExplicit(t, "R3S", 3) 1737 defer c.shutdown() 1738 1739 // Client based API 1740 s := c.randomServer() 1741 nc, js := jsClientConnect(t, s) 1742 defer nc.Close() 1743 1744 // R=1 make sure delete works. 1745 _, err := js.AddStream(&nats.StreamConfig{Name: "TEST"}) 1746 if err != nil { 1747 t.Fatalf("Unexpected error: %v", err) 1748 } 1749 1750 toSend := 10 1751 for i := 1; i <= toSend; i++ { 1752 msg := []byte(fmt.Sprintf("HELLO JSC-%d", i)) 1753 if _, err = js.Publish("TEST", msg); err != nil { 1754 t.Fatalf("Unexpected publish error: %v", err) 1755 } 1756 } 1757 1758 deleteMsg := func(seq uint64) { 1759 if err := js.DeleteMsg("TEST", seq); err != nil { 1760 t.Fatalf("Unexpected error: %v", err) 1761 } 1762 } 1763 1764 deleteMsg(1) 1765 1766 // Also make sure purge of R=1 works too. 1767 if err := js.PurgeStream("TEST"); err != nil { 1768 t.Fatalf("Unexpected purge error: %v", err) 1769 } 1770 } 1771 1772 func TestJetStreamClusterDeleteMsgAndRestart(t *testing.T) { 1773 c := createJetStreamClusterExplicit(t, "R3S", 3) 1774 defer c.shutdown() 1775 1776 // Client based API 1777 s := c.randomServer() 1778 nc, js := jsClientConnect(t, s) 1779 defer nc.Close() 1780 1781 // R=1 make sure delete works. 1782 _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 2}) 1783 if err != nil { 1784 t.Fatalf("Unexpected error: %v", err) 1785 } 1786 1787 toSend := 10 1788 for i := 1; i <= toSend; i++ { 1789 msg := []byte(fmt.Sprintf("HELLO JSC-%d", i)) 1790 if _, err = js.Publish("TEST", msg); err != nil { 1791 t.Fatalf("Unexpected publish error: %v", err) 1792 } 1793 } 1794 1795 deleteMsg := func(seq uint64) { 1796 if err := js.DeleteMsg("TEST", seq); err != nil { 1797 t.Fatalf("Unexpected error: %v", err) 1798 } 1799 } 1800 1801 deleteMsg(1) 1802 1803 c.stopAll() 1804 c.restartAll() 1805 1806 c.waitOnStreamLeader("$G", "TEST") 1807 } 1808 1809 func TestJetStreamClusterStreamSnapshotCatchupWithPurge(t *testing.T) { 1810 c := createJetStreamClusterExplicit(t, "R5S", 5) 1811 defer c.shutdown() 1812 1813 s := c.randomServer() 1814 1815 // Client based API 1816 nc, js := jsClientConnect(t, s) 1817 defer nc.Close() 1818 1819 _, err := js.AddStream(&nats.StreamConfig{ 1820 Name: "TEST", 1821 Subjects: []string{"foo"}, 1822 Replicas: 3, 1823 }) 1824 if err != nil { 1825 t.Fatalf("Unexpected error: %v", err) 1826 } 1827 1828 sl := c.streamLeader("$G", "TEST") 1829 1830 sl.Shutdown() 1831 c.waitOnStreamLeader("$G", "TEST") 1832 1833 toSend := 10 1834 for i := 0; i < toSend; i++ { 1835 if _, err = js.Publish("foo", []byte("OK")); err != nil { 1836 t.Fatalf("Unexpected publish error: %v", err) 1837 } 1838 } 1839 1840 nsl := c.streamLeader("$G", "TEST") 1841 if err := nsl.JetStreamSnapshotStream("$G", "TEST"); err != nil { 1842 t.Fatalf("Error snapshotting stream: %v", err) 1843 } 1844 time.Sleep(250 * time.Millisecond) 1845 1846 sl = c.restartServer(sl) 1847 c.checkClusterFormed() 1848 1849 // Now purge the stream while we are recovering. 1850 if err := js.PurgeStream("TEST"); err != nil { 1851 t.Fatalf("Unexpected purge error: %v", err) 1852 } 1853 1854 c.waitOnServerCurrent(sl) 1855 c.waitOnStreamCurrent(sl, "$G", "TEST") 1856 1857 nsl.Shutdown() 1858 c.waitOnStreamLeader("$G", "TEST") 1859 1860 if _, err := js.StreamInfo("TEST"); err != nil { 1861 t.Fatalf("Unexpected error: %v", err) 1862 } 1863 } 1864 1865 func TestJetStreamClusterExtendedStreamInfo(t *testing.T) { 1866 c := createJetStreamClusterExplicit(t, "R3S", 3) 1867 defer c.shutdown() 1868 1869 // Client based API 1870 s := c.randomServer() 1871 nc, js := jsClientConnect(t, s) 1872 defer nc.Close() 1873 1874 _, err := js.AddStream(&nats.StreamConfig{ 1875 Name: "TEST", 1876 Subjects: []string{"foo"}, 1877 Replicas: 3, 1878 }) 1879 if err != nil { 1880 t.Fatalf("Unexpected error: %v", err) 1881 } 1882 1883 toSend := 50 1884 for i := 0; i < toSend; i++ { 1885 if _, err = js.Publish("foo", []byte("OK")); err != nil { 1886 t.Fatalf("Unexpected publish error: %v", err) 1887 } 1888 } 1889 1890 leader := c.streamLeader("$G", "TEST").Name() 1891 1892 si, err := js.StreamInfo("TEST") 1893 if err != nil { 1894 t.Fatalf("Unexpected error: %v", err) 1895 } 1896 if si.Cluster == nil { 1897 t.Fatalf("Expected cluster info") 1898 } 1899 if si.Cluster.Name != c.name { 1900 t.Fatalf("Expected cluster name of %q, got %q", c.name, si.Cluster.Name) 1901 } 1902 1903 if si.Cluster.Leader != leader { 1904 t.Fatalf("Expected leader of %q, got %q", leader, si.Cluster.Leader) 1905 } 1906 if len(si.Cluster.Replicas) != 2 { 1907 t.Fatalf("Expected %d replicas, got %d", 2, len(si.Cluster.Replicas)) 1908 } 1909 1910 // Make sure that returned array is ordered 1911 for i := 0; i < 50; i++ { 1912 si, err := js.StreamInfo("TEST") 1913 require_NoError(t, err) 1914 require_True(t, len(si.Cluster.Replicas) == 2) 1915 s1 := si.Cluster.Replicas[0].Name 1916 s2 := si.Cluster.Replicas[1].Name 1917 if s1 > s2 { 1918 t.Fatalf("Expected replicas to be ordered, got %s then %s", s1, s2) 1919 } 1920 } 1921 1922 // Faster timeout since we loop below checking for condition. 1923 js2, err := nc.JetStream(nats.MaxWait(250 * time.Millisecond)) 1924 if err != nil { 1925 t.Fatalf("Unexpected error: %v", err) 1926 } 1927 1928 // We may need to wait a bit for peers to catch up. 1929 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 1930 for _, peer := range si.Cluster.Replicas { 1931 if !peer.Current { 1932 if si, err = js2.StreamInfo("TEST"); err != nil { 1933 t.Fatalf("Could not retrieve stream info") 1934 } 1935 return fmt.Errorf("Expected replica to be current: %+v", peer) 1936 } 1937 } 1938 return nil 1939 }) 1940 1941 // Shutdown the leader. 1942 oldLeader := c.streamLeader("$G", "TEST") 1943 oldLeader.Shutdown() 1944 1945 c.waitOnStreamLeader("$G", "TEST") 1946 1947 // Re-request. 1948 leader = c.streamLeader("$G", "TEST").Name() 1949 si, err = js.StreamInfo("TEST") 1950 if err != nil { 1951 t.Fatalf("Unexpected error: %v", err) 1952 } 1953 if si.Cluster == nil { 1954 t.Fatalf("Expected cluster info") 1955 } 1956 if si.Cluster.Leader != leader { 1957 t.Fatalf("Expected leader of %q, got %q", leader, si.Cluster.Leader) 1958 } 1959 if len(si.Cluster.Replicas) != 2 { 1960 t.Fatalf("Expected %d replicas, got %d", 2, len(si.Cluster.Replicas)) 1961 } 1962 for _, peer := range si.Cluster.Replicas { 1963 if peer.Name == oldLeader.Name() { 1964 if peer.Current { 1965 t.Fatalf("Expected old leader to be reported as not current: %+v", peer) 1966 } 1967 } else if !peer.Current { 1968 t.Fatalf("Expected replica to be current: %+v", peer) 1969 } 1970 } 1971 1972 // Now send a few more messages then restart the oldLeader. 1973 for i := 0; i < 10; i++ { 1974 if _, err = js.Publish("foo", []byte("OK")); err != nil { 1975 t.Fatalf("Unexpected publish error: %v", err) 1976 } 1977 } 1978 1979 oldLeader = c.restartServer(oldLeader) 1980 c.checkClusterFormed() 1981 1982 c.waitOnStreamLeader("$G", "TEST") 1983 c.waitOnStreamCurrent(oldLeader, "$G", "TEST") 1984 1985 // Re-request. 1986 leader = c.streamLeader("$G", "TEST").Name() 1987 si, err = js.StreamInfo("TEST") 1988 if err != nil { 1989 t.Fatalf("Unexpected error: %v", err) 1990 } 1991 if si.Cluster == nil { 1992 t.Fatalf("Expected cluster info") 1993 } 1994 if si.Cluster.Leader != leader { 1995 t.Fatalf("Expected leader of %q, got %q", leader, si.Cluster.Leader) 1996 } 1997 if len(si.Cluster.Replicas) != 2 { 1998 t.Fatalf("Expected %d replicas, got %d", 2, len(si.Cluster.Replicas)) 1999 } 2000 2001 // We may need to wait a bit for peers to catch up. 2002 checkFor(t, 10*time.Second, 100*time.Millisecond, func() error { 2003 for _, peer := range si.Cluster.Replicas { 2004 if !peer.Current { 2005 if si, err = js2.StreamInfo("TEST"); err != nil { 2006 t.Fatalf("Could not retrieve stream info") 2007 } 2008 return fmt.Errorf("Expected replica to be current: %+v", peer) 2009 } 2010 } 2011 return nil 2012 }) 2013 2014 nc, js = jsClientConnect(t, c.randomServer()) 2015 defer nc.Close() 2016 2017 // Now do consumer. 2018 sub, err := js.PullSubscribe("foo", "dlc") 2019 if err != nil { 2020 t.Fatalf("Unexpected error: %v", err) 2021 } 2022 defer sub.Unsubscribe() 2023 fetchMsgs(t, sub, 10, 5*time.Second) 2024 2025 leader = c.consumerLeader("$G", "TEST", "dlc").Name() 2026 ci, err := sub.ConsumerInfo() 2027 if err != nil { 2028 t.Fatalf("Unexpected error getting consumer info: %v", err) 2029 } 2030 2031 if ci.Cluster.Leader != leader { 2032 t.Fatalf("Expected leader of %q, got %q", leader, ci.Cluster.Leader) 2033 } 2034 if len(ci.Cluster.Replicas) != 2 { 2035 t.Fatalf("Expected %d replicas, got %d", 2, len(ci.Cluster.Replicas)) 2036 } 2037 checkFor(t, 10*time.Second, 100*time.Millisecond, func() error { 2038 for _, peer := range si.Cluster.Replicas { 2039 if !peer.Current { 2040 return fmt.Errorf("Expected replica to be current: %+v", peer) 2041 } 2042 } 2043 return nil 2044 }) 2045 } 2046 2047 func TestJetStreamClusterExtendedStreamInfoSingleReplica(t *testing.T) { 2048 c := createJetStreamClusterExplicit(t, "R3S", 3) 2049 defer c.shutdown() 2050 2051 // Client based API 2052 s := c.randomServer() 2053 nc, js := jsClientConnect(t, s) 2054 defer nc.Close() 2055 2056 _, err := js.AddStream(&nats.StreamConfig{ 2057 Name: "TEST", 2058 Subjects: []string{"foo"}, 2059 }) 2060 if err != nil { 2061 t.Fatalf("Unexpected error: %v", err) 2062 } 2063 2064 toSend := 50 2065 for i := 0; i < toSend; i++ { 2066 if _, err = js.Publish("foo", []byte("OK")); err != nil { 2067 t.Fatalf("Unexpected publish error: %v", err) 2068 } 2069 } 2070 2071 leader := c.streamLeader("$G", "TEST").Name() 2072 2073 si, err := js.StreamInfo("TEST") 2074 if err != nil { 2075 t.Fatalf("Unexpected error: %v", err) 2076 } 2077 if si.Cluster == nil { 2078 t.Fatalf("Expected cluster info") 2079 } 2080 if si.Cluster.Name != c.name { 2081 t.Fatalf("Expected cluster name of %q, got %q", c.name, si.Cluster.Name) 2082 } 2083 if si.Cluster.Leader != leader { 2084 t.Fatalf("Expected leader of %q, got %q", leader, si.Cluster.Leader) 2085 } 2086 if len(si.Cluster.Replicas) != 0 { 2087 t.Fatalf("Expected no replicas but got %d", len(si.Cluster.Replicas)) 2088 } 2089 2090 // Make sure we can grab consumer lists from any 2091 var infos []*nats.ConsumerInfo 2092 for info := range js.ConsumersInfo("TEST") { 2093 infos = append(infos, info) 2094 } 2095 if len(infos) != 0 { 2096 t.Fatalf("ConsumerInfo expected no paged results, got %d", len(infos)) 2097 } 2098 2099 // Now add in a consumer. 2100 cfg := &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy} 2101 if _, err := js.AddConsumer("TEST", cfg); err != nil { 2102 t.Fatalf("Unexpected error: %v", err) 2103 } 2104 2105 infos = infos[:0] 2106 for info := range js.ConsumersInfo("TEST") { 2107 infos = append(infos, info) 2108 } 2109 if len(infos) != 1 { 2110 t.Fatalf("ConsumerInfo expected 1 result, got %d", len(infos)) 2111 } 2112 2113 // Now do direct names list as well. 2114 var names []string 2115 for name := range js.ConsumerNames("TEST") { 2116 names = append(names, name) 2117 } 2118 if len(names) != 1 { 2119 t.Fatalf("Expected only 1 consumer but got %d", len(names)) 2120 } 2121 } 2122 2123 func TestJetStreamClusterInterestRetention(t *testing.T) { 2124 c := createJetStreamClusterExplicit(t, "R3S", 3) 2125 defer c.shutdown() 2126 2127 // Client based API 2128 s := c.randomServer() 2129 nc, js := jsClientConnect(t, s) 2130 defer nc.Close() 2131 2132 _, err := js.AddStream(&nats.StreamConfig{Name: "foo", Retention: nats.InterestPolicy, Replicas: 3}) 2133 if err != nil { 2134 t.Fatalf("Unexpected error: %v", err) 2135 } 2136 sub, err := js.SubscribeSync("foo", nats.Durable("dlc")) 2137 if err != nil { 2138 t.Fatalf("Unexpected error: %v", err) 2139 } 2140 2141 sl := c.streamLeader("$G", "foo") 2142 cl := c.consumerLeader("$G", "foo", "dlc") 2143 if sl == cl { 2144 _, err := nc.Request(fmt.Sprintf(JSApiStreamLeaderStepDownT, "foo"), nil, time.Second) 2145 if err != nil { 2146 t.Fatalf("Unexpected error: %v", err) 2147 } 2148 c.waitOnStreamLeader("$G", "foo") 2149 } 2150 2151 if _, err = js.Publish("foo", []byte("OK")); err != nil { 2152 t.Fatalf("Unexpected publish error: %v", err) 2153 } 2154 2155 m, err := sub.NextMsg(time.Second) 2156 if err != nil { 2157 t.Fatalf("Unexpected error getting msg: %v", err) 2158 } 2159 m.AckSync() 2160 2161 waitForZero := func() { 2162 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 2163 si, err := js.StreamInfo("foo") 2164 if err != nil { 2165 t.Fatalf("Unexpected error: %v", err) 2166 } 2167 if si.State.Msgs != 0 { 2168 return fmt.Errorf("Expected 0 msgs, got state: %+v", si.State) 2169 } 2170 return nil 2171 }) 2172 } 2173 2174 waitForZero() 2175 2176 // Add in 50 messages. 2177 for i := 0; i < 50; i++ { 2178 if _, err = js.Publish("foo", []byte("more")); err != nil { 2179 t.Fatalf("Unexpected publish error: %v", err) 2180 } 2181 } 2182 checkSubsPending(t, sub, 50) 2183 2184 // Now delete the consumer and make sure the stream goes to zero. 2185 if err := js.DeleteConsumer("foo", "dlc"); err != nil { 2186 t.Fatalf("Unexpected error: %v", err) 2187 } 2188 2189 waitForZero() 2190 } 2191 2192 // https://github.com/nats-io/nats-server/issues/2243 2193 func TestJetStreamClusterWorkQueueRetention(t *testing.T) { 2194 c := createJetStreamClusterExplicit(t, "R3S", 3) 2195 defer c.shutdown() 2196 2197 // Client based API 2198 s := c.randomServer() 2199 nc, js := jsClientConnect(t, s) 2200 defer nc.Close() 2201 2202 _, err := js.AddStream(&nats.StreamConfig{ 2203 Name: "FOO", 2204 Subjects: []string{"foo.*"}, 2205 Replicas: 2, 2206 Retention: nats.WorkQueuePolicy, 2207 }) 2208 if err != nil { 2209 t.Fatalf("Unexpected error: %v", err) 2210 } 2211 2212 sub, err := js.PullSubscribe("foo.test", "test") 2213 if err != nil { 2214 t.Fatalf("Unexpected error: %v", err) 2215 } 2216 2217 if _, err = js.Publish("foo.test", []byte("OK")); err != nil { 2218 t.Fatalf("Unexpected publish error: %v", err) 2219 } 2220 si, err := js.StreamInfo("FOO") 2221 if err != nil { 2222 t.Fatalf("Unexpected error: %v", err) 2223 } 2224 if si.State.Msgs != 1 { 2225 t.Fatalf("Expected 1 msg, got state: %+v", si.State) 2226 } 2227 2228 // Fetch from our pull consumer and ack. 2229 for _, m := range fetchMsgs(t, sub, 1, 5*time.Second) { 2230 m.AckSync() 2231 } 2232 2233 // Make sure the messages are removed. 2234 checkFor(t, 5*time.Second, 100*time.Millisecond, func() error { 2235 si, err := js.StreamInfo("FOO") 2236 if err != nil { 2237 t.Fatalf("Unexpected error: %v", err) 2238 } 2239 if si.State.Msgs != 0 { 2240 return fmt.Errorf("Expected 0 msgs, got state: %+v", si.State) 2241 } 2242 return nil 2243 }) 2244 2245 } 2246 2247 func TestJetStreamClusterMirrorAndSourceWorkQueues(t *testing.T) { 2248 c := createJetStreamClusterExplicit(t, "WQ", 3) 2249 defer c.shutdown() 2250 2251 // Client for API requests. 2252 nc, js := jsClientConnect(t, c.randomServer()) 2253 defer nc.Close() 2254 2255 _, err := js.AddStream(&nats.StreamConfig{ 2256 Name: "WQ22", 2257 Subjects: []string{"foo"}, 2258 Replicas: 2, 2259 Retention: nats.WorkQueuePolicy, 2260 }) 2261 if err != nil { 2262 t.Fatalf("Unexpected error: %v", err) 2263 } 2264 2265 _, err = js.AddStream(&nats.StreamConfig{ 2266 Name: "M", 2267 Replicas: 2, 2268 Mirror: &nats.StreamSource{Name: "WQ22"}, 2269 }) 2270 if err != nil { 2271 t.Fatalf("Unexpected error: %v", err) 2272 } 2273 2274 _, err = js.AddStream(&nats.StreamConfig{ 2275 Name: "S", 2276 Replicas: 2, 2277 Sources: []*nats.StreamSource{{Name: "WQ22"}}, 2278 }) 2279 if err != nil { 2280 t.Fatalf("Unexpected error: %v", err) 2281 } 2282 // Allow direct sync consumers to connect. 2283 time.Sleep(500 * time.Millisecond) 2284 2285 if _, err = js.Publish("foo", []byte("ok")); err != nil { 2286 t.Fatalf("Unexpected publish error: %v", err) 2287 } 2288 2289 checkFor(t, 5*time.Second, 250*time.Millisecond, func() error { 2290 if si, _ := js.StreamInfo("WQ22"); si.State.Msgs != 0 { 2291 return fmt.Errorf("Expected no msgs for %q, got %d", "WQ22", si.State.Msgs) 2292 } 2293 if si, _ := js.StreamInfo("M"); si.State.Msgs != 1 { 2294 return fmt.Errorf("Expected 1 msg for %q, got %d", "M", si.State.Msgs) 2295 } 2296 if si, _ := js.StreamInfo("S"); si.State.Msgs != 1 { 2297 return fmt.Errorf("Expected 1 msg for %q, got %d", "S", si.State.Msgs) 2298 } 2299 return nil 2300 }) 2301 2302 } 2303 2304 func TestJetStreamClusterMirrorAndSourceInterestPolicyStream(t *testing.T) { 2305 c := createJetStreamClusterExplicit(t, "WQ", 3) 2306 defer c.shutdown() 2307 2308 // Client for API requests. 2309 nc, js := jsClientConnect(t, c.randomServer()) 2310 defer nc.Close() 2311 2312 _, err := js.AddStream(&nats.StreamConfig{ 2313 Name: "IP22", 2314 Subjects: []string{"foo"}, 2315 Replicas: 3, 2316 Retention: nats.InterestPolicy, 2317 }) 2318 if err != nil { 2319 t.Fatalf("Unexpected error: %v", err) 2320 } 2321 2322 _, err = js.AddStream(&nats.StreamConfig{ 2323 Name: "M", 2324 Replicas: 2, 2325 Mirror: &nats.StreamSource{Name: "IP22"}, 2326 }) 2327 if err != nil { 2328 t.Fatalf("Unexpected error: %v", err) 2329 } 2330 2331 _, err = js.AddStream(&nats.StreamConfig{ 2332 Name: "S", 2333 Replicas: 2, 2334 Sources: []*nats.StreamSource{{Name: "IP22"}}, 2335 }) 2336 if err != nil { 2337 t.Fatalf("Unexpected error: %v", err) 2338 } 2339 // Allow sync consumers to connect. 2340 time.Sleep(500 * time.Millisecond) 2341 2342 if _, err = js.Publish("foo", []byte("ok")); err != nil { 2343 t.Fatalf("Unexpected publish error: %v", err) 2344 } 2345 2346 checkFor(t, 5*time.Second, 250*time.Millisecond, func() error { 2347 // This one will be 0 since no other interest exists. 2348 if si, _ := js.StreamInfo("IP22"); si.State.Msgs != 0 { 2349 return fmt.Errorf("Expected no msgs for %q, got %d", "IP22", si.State.Msgs) 2350 } 2351 if si, _ := js.StreamInfo("M"); si.State.Msgs != 1 { 2352 return fmt.Errorf("Expected 1 msg for %q, got %d", "M", si.State.Msgs) 2353 } 2354 if si, _ := js.StreamInfo("S"); si.State.Msgs != 1 { 2355 return fmt.Errorf("Expected 1 msg for %q, got %d", "S", si.State.Msgs) 2356 } 2357 return nil 2358 }) 2359 2360 // Now create other interest on IP22. 2361 sub, err := js.SubscribeSync("foo") 2362 if err != nil { 2363 t.Fatalf("Unexpected error: %v", err) 2364 } 2365 defer sub.Unsubscribe() 2366 // Allow consumer state to propagate. 2367 time.Sleep(500 * time.Millisecond) 2368 2369 if _, err = js.Publish("foo", []byte("ok")); err != nil { 2370 t.Fatalf("Unexpected publish error: %v", err) 2371 } 2372 2373 checkFor(t, 5*time.Second, 250*time.Millisecond, func() error { 2374 // This one will be 0 since no other interest exists. 2375 if si, _ := js.StreamInfo("IP22"); si.State.Msgs != 1 { 2376 return fmt.Errorf("Expected 1 msg for %q, got %d", "IP22", si.State.Msgs) 2377 } 2378 if si, _ := js.StreamInfo("M"); si.State.Msgs != 2 { 2379 return fmt.Errorf("Expected 2 msgs for %q, got %d", "M", si.State.Msgs) 2380 } 2381 if si, _ := js.StreamInfo("S"); si.State.Msgs != 2 { 2382 return fmt.Errorf("Expected 2 msgs for %q, got %d", "S", si.State.Msgs) 2383 } 2384 return nil 2385 }) 2386 } 2387 2388 func TestJetStreamClusterInterestRetentionWithFilteredConsumers(t *testing.T) { 2389 c := createJetStreamClusterExplicit(t, "R3S", 3) 2390 defer c.shutdown() 2391 2392 // Client based API 2393 nc, js := jsClientConnect(t, c.randomServer()) 2394 defer nc.Close() 2395 2396 _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"*"}, Retention: nats.InterestPolicy, Replicas: 3}) 2397 if err != nil { 2398 t.Fatalf("Unexpected error: %v", err) 2399 } 2400 2401 fsub, err := js.SubscribeSync("foo", nats.Durable("d1")) 2402 if err != nil { 2403 t.Fatalf("Unexpected error: %v", err) 2404 } 2405 defer fsub.Unsubscribe() 2406 2407 bsub, err := js.SubscribeSync("bar", nats.Durable("d2")) 2408 if err != nil { 2409 t.Fatalf("Unexpected error: %v", err) 2410 } 2411 defer bsub.Unsubscribe() 2412 2413 msg := []byte("FILTERED") 2414 sendMsg := func(subj string) { 2415 t.Helper() 2416 if _, err = js.Publish(subj, msg); err != nil { 2417 t.Fatalf("Unexpected publish error: %v", err) 2418 } 2419 } 2420 2421 getAndAck := func(sub *nats.Subscription) { 2422 t.Helper() 2423 m, err := sub.NextMsg(time.Second) 2424 if err != nil { 2425 t.Fatalf("Unexpected error getting msg: %v", err) 2426 } 2427 m.AckSync() 2428 } 2429 2430 jsq, err := nc.JetStream(nats.MaxWait(250 * time.Millisecond)) 2431 if err != nil { 2432 t.Fatalf("Unexpected error: %v", err) 2433 } 2434 2435 checkState := func(expected uint64) { 2436 t.Helper() 2437 checkFor(t, 5*time.Second, 100*time.Millisecond, func() error { 2438 t.Helper() 2439 si, err := jsq.StreamInfo("TEST") 2440 if err != nil { 2441 t.Fatalf("Unexpected error: %v", err) 2442 } 2443 if si.State.Msgs != expected { 2444 return fmt.Errorf("Expected %d msgs, got %d", expected, si.State.Msgs) 2445 } 2446 return nil 2447 }) 2448 } 2449 2450 sendMsg("foo") 2451 checkState(1) 2452 getAndAck(fsub) 2453 checkState(0) 2454 sendMsg("bar") 2455 sendMsg("foo") 2456 checkState(2) 2457 getAndAck(bsub) 2458 checkState(1) 2459 getAndAck(fsub) 2460 checkState(0) 2461 2462 // Now send a bunch of messages and then delete the consumer. 2463 for i := 0; i < 10; i++ { 2464 sendMsg("foo") 2465 sendMsg("bar") 2466 } 2467 checkState(20) 2468 2469 if err := js.DeleteConsumer("TEST", "d1"); err != nil { 2470 t.Fatalf("Unexpected error: %v", err) 2471 } 2472 if err := js.DeleteConsumer("TEST", "d2"); err != nil { 2473 t.Fatalf("Unexpected error: %v", err) 2474 } 2475 checkState(0) 2476 2477 // Now make sure pull based consumers work same. 2478 if _, err := js.PullSubscribe("foo", "dlc"); err != nil { 2479 t.Fatalf("Unexpected error: %v", err) 2480 } 2481 2482 // Now send a bunch of messages and then delete the consumer. 2483 for i := 0; i < 10; i++ { 2484 sendMsg("foo") 2485 sendMsg("bar") 2486 } 2487 checkState(10) 2488 2489 if err := js.DeleteConsumer("TEST", "dlc"); err != nil { 2490 t.Fatalf("Unexpected error: %v", err) 2491 } 2492 checkState(0) 2493 } 2494 2495 func TestJetStreamClusterEphemeralConsumerNoImmediateInterest(t *testing.T) { 2496 c := createJetStreamClusterExplicit(t, "R3S", 3) 2497 defer c.shutdown() 2498 2499 // Client based API 2500 s := c.randomServer() 2501 nc, js := jsClientConnect(t, s) 2502 defer nc.Close() 2503 2504 _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 3}) 2505 if err != nil { 2506 t.Fatalf("Unexpected error: %v", err) 2507 } 2508 2509 // We want to relax the strict interest requirement. 2510 ci, err := js.AddConsumer("TEST", &nats.ConsumerConfig{DeliverSubject: "r"}) 2511 if err != nil { 2512 t.Fatalf("Unexpected error: %v", err) 2513 } 2514 2515 cl := c.consumerLeader("$G", "TEST", ci.Name) 2516 mset, err := cl.GlobalAccount().lookupStream("TEST") 2517 if err != nil { 2518 t.Fatalf("Expected to find a stream for %q", "TEST") 2519 } 2520 o := mset.lookupConsumer(ci.Name) 2521 if o == nil { 2522 t.Fatalf("Error looking up consumer %q", ci.Name) 2523 } 2524 o.setInActiveDeleteThreshold(500 * time.Millisecond) 2525 2526 // Make sure the consumer goes away though eventually. 2527 // Should be 5 seconds wait. 2528 checkFor(t, 5*time.Second, 100*time.Millisecond, func() error { 2529 if _, err := js.ConsumerInfo("TEST", ci.Name); err != nil { 2530 return nil 2531 } 2532 return fmt.Errorf("Consumer still present") 2533 }) 2534 } 2535 2536 func TestJetStreamClusterEphemeralConsumerCleanup(t *testing.T) { 2537 c := createJetStreamClusterExplicit(t, "R3S", 3) 2538 defer c.shutdown() 2539 2540 // Client based API 2541 s := c.randomServer() 2542 nc, js := jsClientConnect(t, s) 2543 defer nc.Close() 2544 2545 _, err := js.AddStream(&nats.StreamConfig{Name: "foo", Replicas: 2}) 2546 if err != nil { 2547 t.Fatalf("Unexpected error: %v", err) 2548 } 2549 sub, err := js.Subscribe("foo", func(m *nats.Msg) {}) 2550 if err != nil { 2551 t.Fatalf("Unexpected error: %v", err) 2552 } 2553 2554 ci, _ := sub.ConsumerInfo() 2555 if ci == nil { 2556 t.Fatalf("Unexpected error: no consumer info") 2557 } 2558 2559 // We will look up by hand this consumer to set inactive threshold lower for this test. 2560 cl := c.consumerLeader("$G", "foo", ci.Name) 2561 if cl == nil { 2562 t.Fatalf("Could not find consumer leader") 2563 } 2564 mset, err := cl.GlobalAccount().lookupStream("foo") 2565 if err != nil { 2566 t.Fatalf("Expected to find a stream for %q", "foo") 2567 } 2568 o := mset.lookupConsumer(ci.Name) 2569 if o == nil { 2570 t.Fatalf("Error looking up consumer %q", ci.Name) 2571 } 2572 o.setInActiveDeleteThreshold(10 * time.Millisecond) 2573 2574 msg, toSend := []byte("Hello JS Clustering"), 10 2575 for i := 0; i < toSend; i++ { 2576 if _, err = js.Publish("foo", msg); err != nil { 2577 t.Fatalf("Unexpected publish error: %v", err) 2578 } 2579 } 2580 2581 getConsumers := func() []string { 2582 ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) 2583 defer cancel() 2584 2585 var names []string 2586 for name := range js.ConsumerNames("foo", nats.Context(ctx)) { 2587 names = append(names, name) 2588 } 2589 return names 2590 } 2591 2592 checkConsumer := func(expected int) { 2593 consumers := getConsumers() 2594 if len(consumers) != expected { 2595 t.Fatalf("Expected %d consumers but got %d", expected, len(consumers)) 2596 } 2597 } 2598 2599 checkConsumer(1) 2600 2601 // Now Unsubscribe, since this is ephemeral this will make this go away. 2602 sub.Unsubscribe() 2603 2604 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 2605 if consumers := getConsumers(); len(consumers) == 0 { 2606 return nil 2607 } else { 2608 return fmt.Errorf("Still %d consumers remaining", len(consumers)) 2609 } 2610 }) 2611 } 2612 2613 func TestJetStreamClusterEphemeralConsumersNotReplicated(t *testing.T) { 2614 c := createJetStreamClusterExplicit(t, "R3S", 3) 2615 defer c.shutdown() 2616 2617 // Client based API 2618 s := c.randomServer() 2619 nc, js := jsClientConnect(t, s) 2620 defer nc.Close() 2621 2622 _, err := js.AddStream(&nats.StreamConfig{Name: "foo", Replicas: 3}) 2623 if err != nil { 2624 t.Fatalf("Unexpected error: %v", err) 2625 } 2626 sub, err := js.SubscribeSync("foo") 2627 if err != nil { 2628 t.Fatalf("Unexpected error: %v", err) 2629 } 2630 ci, _ := sub.ConsumerInfo() 2631 if ci == nil { 2632 t.Fatalf("Unexpected error: no consumer info") 2633 } 2634 2635 if _, err = js.Publish("foo", []byte("OK")); err != nil { 2636 t.Fatalf("Unexpected publish error: %v", err) 2637 } 2638 checkSubsPending(t, sub, 1) 2639 sub.NextMsg(0) 2640 2641 if ci.Cluster == nil || len(ci.Cluster.Replicas) != 0 { 2642 t.Fatalf("Expected ephemeral to be R=1, got %+v", ci.Cluster) 2643 } 2644 scl := c.serverByName(ci.Cluster.Leader) 2645 if scl == nil { 2646 t.Fatalf("Could not select server where ephemeral consumer is running") 2647 } 2648 2649 // Test migrations. If we are also metadata leader will not work so skip. 2650 if scl == c.leader() { 2651 return 2652 } 2653 2654 scl.Shutdown() 2655 c.waitOnStreamLeader("$G", "foo") 2656 2657 if _, err = js.Publish("foo", []byte("OK")); err != nil { 2658 t.Fatalf("Unexpected publish error: %v", err) 2659 } 2660 2661 if _, err := sub.NextMsg(500 * time.Millisecond); err != nil { 2662 t.Logf("Expected to see another message, but behavior is optimistic so can fail") 2663 } 2664 } 2665 2666 func TestJetStreamClusterUserSnapshotAndRestore(t *testing.T) { 2667 c := createJetStreamClusterExplicit(t, "R3S", 3) 2668 defer c.shutdown() 2669 2670 // Client based API 2671 s := c.randomServer() 2672 nc, js := jsClientConnect(t, s) 2673 defer nc.Close() 2674 2675 _, err := js.AddStream(&nats.StreamConfig{ 2676 Name: "TEST", 2677 Subjects: []string{"foo"}, 2678 Replicas: 2, 2679 }) 2680 if err != nil { 2681 t.Fatalf("Unexpected error: %v", err) 2682 } 2683 2684 toSend := 200 2685 2686 for i := 0; i < toSend; i++ { 2687 if _, err = js.Publish("foo", []byte("OK")); err != nil { 2688 t.Fatalf("Unexpected publish error: %v", err) 2689 } 2690 } 2691 2692 // Create consumer with no state. 2693 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "rip", AckPolicy: nats.AckExplicitPolicy}) 2694 if err != nil { 2695 t.Fatalf("Unexpected error: %v", err) 2696 } 2697 2698 // Create another consumer as well and give it a non-simplistic state. 2699 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy, AckWait: 10 * time.Second}) 2700 if err != nil { 2701 t.Fatalf("Unexpected error: %v", err) 2702 } 2703 2704 jsub, err := js.PullSubscribe("foo", "dlc") 2705 if err != nil { 2706 t.Fatalf("Unexpected error: %v", err) 2707 } 2708 // Ack first 50. 2709 for _, m := range fetchMsgs(t, jsub, 50, 5*time.Second) { 2710 m.AckSync() 2711 } 2712 // Now ack every third message for next 50. 2713 for i, m := range fetchMsgs(t, jsub, 50, 5*time.Second) { 2714 if i%3 == 0 { 2715 m.AckSync() 2716 } 2717 } 2718 2719 // Snapshot consumer info. 2720 ci, err := jsub.ConsumerInfo() 2721 if err != nil { 2722 t.Fatalf("Unexpected error getting consumer info: %v", err) 2723 } 2724 2725 sreq := &JSApiStreamSnapshotRequest{ 2726 DeliverSubject: nats.NewInbox(), 2727 ChunkSize: 512, 2728 } 2729 2730 req, _ := json.Marshal(sreq) 2731 rmsg, err := nc.Request(fmt.Sprintf(JSApiStreamSnapshotT, "TEST"), req, time.Second) 2732 if err != nil { 2733 t.Fatalf("Unexpected error on snapshot request: %v", err) 2734 } 2735 2736 var resp JSApiStreamSnapshotResponse 2737 json.Unmarshal(rmsg.Data, &resp) 2738 if resp.Error != nil { 2739 t.Fatalf("Did not get correct error response: %+v", resp.Error) 2740 } 2741 2742 // Grab state for comparison. 2743 state := *resp.State 2744 config := *resp.Config 2745 2746 var snapshot []byte 2747 done := make(chan bool) 2748 2749 sub, _ := nc.Subscribe(sreq.DeliverSubject, func(m *nats.Msg) { 2750 // EOF 2751 if len(m.Data) == 0 { 2752 done <- true 2753 return 2754 } 2755 // Could be writing to a file here too. 2756 snapshot = append(snapshot, m.Data...) 2757 // Flow ack 2758 m.Respond(nil) 2759 }) 2760 defer sub.Unsubscribe() 2761 2762 // Wait to receive the snapshot. 2763 select { 2764 case <-done: 2765 case <-time.After(5 * time.Second): 2766 t.Fatalf("Did not receive our snapshot in time") 2767 } 2768 2769 var rresp JSApiStreamRestoreResponse 2770 rreq := &JSApiStreamRestoreRequest{ 2771 Config: config, 2772 State: state, 2773 } 2774 req, _ = json.Marshal(rreq) 2775 2776 // Make sure a restore to an existing stream fails. 2777 rmsg, err = nc.Request(fmt.Sprintf(JSApiStreamRestoreT, "TEST"), req, 2*time.Second) 2778 if err != nil { 2779 t.Fatalf("Unexpected error: %v", err) 2780 } 2781 json.Unmarshal(rmsg.Data, &rresp) 2782 if !IsNatsErr(rresp.Error, JSStreamNameExistRestoreFailedErr) { 2783 t.Fatalf("Did not get correct error response: %+v", rresp.Error) 2784 } 2785 2786 if _, err := js.StreamInfo("TEST"); err != nil { 2787 t.Fatalf("Unexpected error: %v", err) 2788 } 2789 2790 // Now make sure a restore will work. 2791 // Delete our stream first. 2792 if err := js.DeleteStream("TEST"); err != nil { 2793 t.Fatalf("Unexpected error: %v", err) 2794 } 2795 if _, err := js.StreamInfo("TEST"); err == nil || !strings.Contains(err.Error(), "not found") { 2796 t.Fatalf("Expected not found error: %v", err) 2797 } 2798 2799 // This should work properly. 2800 rmsg, err = nc.Request(fmt.Sprintf(JSApiStreamRestoreT, "TEST"), req, 5*time.Second) 2801 if err != nil { 2802 t.Fatalf("Unexpected error: %v", err) 2803 } 2804 2805 rresp.Error = nil 2806 json.Unmarshal(rmsg.Data, &rresp) 2807 if rresp.Error != nil { 2808 t.Fatalf("Got an unexpected error response: %+v", rresp.Error) 2809 } 2810 if rresp.DeliverSubject == _EMPTY_ { 2811 t.Fatalf("No deliver subject set on response: %+v", rresp) 2812 } 2813 // Send our snapshot back in to restore the stream. 2814 // Can be any size message. 2815 var chunk [1024]byte 2816 for r := bytes.NewReader(snapshot); ; { 2817 n, err := r.Read(chunk[:]) 2818 if err != nil { 2819 break 2820 } 2821 nc.Request(rresp.DeliverSubject, chunk[:n], time.Second) 2822 } 2823 rmsg, err = nc.Request(rresp.DeliverSubject, nil, 2*time.Second) 2824 if err != nil { 2825 t.Fatalf("Unexpected error: %v", err) 2826 } 2827 rresp.Error = nil 2828 json.Unmarshal(rmsg.Data, &rresp) 2829 if rresp.Error != nil { 2830 t.Fatalf("Got an unexpected error response: %+v", rresp.Error) 2831 } 2832 2833 si, err := js.StreamInfo("TEST") 2834 if err != nil { 2835 t.Fatalf("Unexpected error: %v", err) 2836 } 2837 if si == nil || si.Config.Name != "TEST" || si.State.Msgs != uint64(toSend) { 2838 t.Fatalf("StreamInfo is not correct %+v", si) 2839 } 2840 2841 // Make sure the replicas become current eventually. They will be doing catchup. 2842 checkFor(t, 10*time.Second, 100*time.Millisecond, func() error { 2843 si, _ := js.StreamInfo("TEST") 2844 if si == nil || si.Cluster == nil { 2845 t.Fatalf("Did not get stream info") 2846 } 2847 for _, pi := range si.Cluster.Replicas { 2848 if !pi.Current { 2849 return fmt.Errorf("Peer not current: %+v", pi) 2850 } 2851 } 2852 return nil 2853 }) 2854 2855 // Wait on the system to elect a leader for the restored consumer. 2856 c.waitOnConsumerLeader("$G", "TEST", "dlc") 2857 2858 // Now check for the consumer being recreated. 2859 nci, err := js.ConsumerInfo("TEST", "dlc") 2860 if err != nil { 2861 t.Fatalf("Unexpected error: %v", err) 2862 } 2863 // nil out timestamp for better comparison 2864 nci.Delivered.Last, ci.Delivered.Last = nil, nil 2865 if nci.Delivered != ci.Delivered { 2866 t.Fatalf("Delivered states do not match %+v vs %+v", nci.Delivered, ci.Delivered) 2867 } 2868 nci.AckFloor.Last, ci.AckFloor.Last = nil, nil 2869 if nci.AckFloor != ci.AckFloor { 2870 t.Fatalf("Ack floors did not match %+v vs %+v", nci.AckFloor, ci.AckFloor) 2871 } 2872 2873 // Make sure consumer works. 2874 // It should pick up with the next delivery spot, so check for that as first message. 2875 // We should have all the messages for first delivery delivered. 2876 wantSeq := 101 2877 for _, m := range fetchMsgs(t, jsub, 100, 5*time.Second) { 2878 meta, err := m.Metadata() 2879 if err != nil { 2880 t.Fatalf("Unexpected error: %v", err) 2881 } 2882 if meta.Sequence.Stream != uint64(wantSeq) { 2883 t.Fatalf("Expected stream sequence of %d, but got %d", wantSeq, meta.Sequence.Stream) 2884 } 2885 m.AckSync() 2886 wantSeq++ 2887 } 2888 2889 // Check that redelivered come in now.. 2890 redelivered := 50/3 + 1 2891 fetchMsgs(t, jsub, redelivered, 15*time.Second) 2892 2893 // Now make sure the other server was properly caughtup. 2894 // Need to call this by hand for now. 2895 rmsg, err = nc.Request(fmt.Sprintf(JSApiStreamLeaderStepDownT, "TEST"), nil, time.Second) 2896 if err != nil { 2897 t.Fatalf("Unexpected error: %v", err) 2898 } 2899 var sdResp JSApiStreamLeaderStepDownResponse 2900 if err := json.Unmarshal(rmsg.Data, &sdResp); err != nil { 2901 t.Fatalf("Unexpected error: %v", err) 2902 } 2903 if sdResp.Error != nil { 2904 t.Fatalf("Unexpected error: %+v", sdResp.Error) 2905 } 2906 2907 c.waitOnStreamLeader("$G", "TEST") 2908 si, err = js.StreamInfo("TEST") 2909 if err != nil { 2910 t.Fatalf("Unexpected error: %+v", err) 2911 } 2912 if si.State.Msgs != uint64(toSend) { 2913 t.Fatalf("Unexpected stream info: %+v", si) 2914 } 2915 2916 // Check idle consumer 2917 c.waitOnConsumerLeader("$G", "TEST", "rip") 2918 2919 // Now check for the consumer being recreated. 2920 if _, err := js.ConsumerInfo("TEST", "rip"); err != nil { 2921 t.Fatalf("Unexpected error: %+v", err) 2922 } 2923 } 2924 2925 func TestJetStreamClusterUserSnapshotAndRestoreConfigChanges(t *testing.T) { 2926 c := createJetStreamClusterExplicit(t, "R3S", 3) 2927 defer c.shutdown() 2928 2929 // Client based API 2930 nc, js := jsClientConnect(t, c.randomServer()) 2931 defer nc.Close() 2932 2933 // FIXME(dlc) - Do case with R=1 2934 cfg := &nats.StreamConfig{ 2935 Name: "TEST", 2936 Subjects: []string{"foo"}, 2937 Replicas: 2, 2938 } 2939 2940 if _, err := js.AddStream(cfg); err != nil { 2941 t.Fatalf("Unexpected error: %v", err) 2942 } 2943 2944 toSend := 10 2945 for i := 0; i < toSend; i++ { 2946 if _, err := js.Publish("foo", []byte("OK")); err != nil { 2947 t.Fatalf("Unexpected publish error: %v", err) 2948 } 2949 } 2950 2951 getSnapshot := func() ([]byte, *StreamState) { 2952 t.Helper() 2953 sreq := &JSApiStreamSnapshotRequest{ 2954 DeliverSubject: nats.NewInbox(), 2955 ChunkSize: 1024, 2956 } 2957 2958 req, _ := json.Marshal(sreq) 2959 rmsg, err := nc.Request(fmt.Sprintf(JSApiStreamSnapshotT, "TEST"), req, time.Second) 2960 if err != nil { 2961 t.Fatalf("Unexpected error on snapshot request: %v", err) 2962 } 2963 2964 var resp JSApiStreamSnapshotResponse 2965 json.Unmarshal(rmsg.Data, &resp) 2966 if resp.Error != nil { 2967 t.Fatalf("Did not get correct error response: %+v", resp.Error) 2968 } 2969 2970 var snapshot []byte 2971 done := make(chan bool) 2972 2973 sub, _ := nc.Subscribe(sreq.DeliverSubject, func(m *nats.Msg) { 2974 // EOF 2975 if len(m.Data) == 0 { 2976 done <- true 2977 return 2978 } 2979 // Could be writing to a file here too. 2980 snapshot = append(snapshot, m.Data...) 2981 // Flow ack 2982 m.Respond(nil) 2983 }) 2984 defer sub.Unsubscribe() 2985 2986 // Wait to receive the snapshot. 2987 select { 2988 case <-done: 2989 case <-time.After(5 * time.Second): 2990 t.Fatalf("Did not receive our snapshot in time") 2991 } 2992 return snapshot, resp.State 2993 } 2994 2995 restore := func(cfg *StreamConfig, state *StreamState, snap []byte) *nats.StreamInfo { 2996 rreq := &JSApiStreamRestoreRequest{ 2997 Config: *cfg, 2998 State: *state, 2999 } 3000 req, err := json.Marshal(rreq) 3001 if err != nil { 3002 t.Fatalf("Unexpected error: %v", err) 3003 } 3004 rmsg, err := nc.Request(fmt.Sprintf(JSApiStreamRestoreT, cfg.Name), req, 5*time.Second) 3005 if err != nil { 3006 t.Fatalf("Unexpected error: %v", err) 3007 } 3008 var rresp JSApiStreamRestoreResponse 3009 json.Unmarshal(rmsg.Data, &rresp) 3010 if rresp.Error != nil { 3011 t.Fatalf("Got an unexpected error response: %+v", rresp.Error) 3012 } 3013 if rresp.DeliverSubject == _EMPTY_ { 3014 t.Fatalf("No deliver subject set on response: %+v", rresp) 3015 } 3016 // Send our snapshot back in to restore the stream. 3017 // Can be any size message. 3018 var chunk [1024]byte 3019 for r := bytes.NewReader(snap); ; { 3020 n, err := r.Read(chunk[:]) 3021 if err != nil { 3022 break 3023 } 3024 nc.Request(rresp.DeliverSubject, chunk[:n], time.Second) 3025 } 3026 rmsg, err = nc.Request(rresp.DeliverSubject, nil, 2*time.Second) 3027 if err != nil { 3028 t.Fatalf("Unexpected error: %v", err) 3029 } 3030 rresp.Error = nil 3031 json.Unmarshal(rmsg.Data, &rresp) 3032 if rresp.Error != nil { 3033 t.Fatalf("Got an unexpected error response: %+v", rresp.Error) 3034 } 3035 si, err := js.StreamInfo(cfg.Name) 3036 if err != nil { 3037 t.Fatalf("Unexpected error: %v", err) 3038 } 3039 return si 3040 } 3041 3042 snap, state := getSnapshot() 3043 3044 if err := js.DeleteStream("TEST"); err != nil { 3045 t.Fatalf("Unexpected error: %v", err) 3046 } 3047 3048 // Now change subjects. 3049 ncfg := &StreamConfig{ 3050 Name: "TEST", 3051 Subjects: []string{"bar", "baz"}, 3052 Storage: FileStorage, 3053 Replicas: 2, 3054 } 3055 if si := restore(ncfg, state, snap); !reflect.DeepEqual(si.Config.Subjects, ncfg.Subjects) { 3056 t.Fatalf("Did not get expected stream info: %+v", si) 3057 } 3058 if err := js.DeleteStream("TEST"); err != nil { 3059 t.Fatalf("Unexpected error: %v", err) 3060 } 3061 // Storage 3062 ncfg.Storage = MemoryStorage 3063 if si := restore(ncfg, state, snap); !reflect.DeepEqual(si.Config.Subjects, ncfg.Subjects) { 3064 t.Fatalf("Did not get expected stream info: %+v", si) 3065 } 3066 if err := js.DeleteStream("TEST"); err != nil { 3067 t.Fatalf("Unexpected error: %v", err) 3068 } 3069 // Now replicas 3070 ncfg.Replicas = 3 3071 if si := restore(ncfg, state, snap); !reflect.DeepEqual(si.Config.Subjects, ncfg.Subjects) { 3072 t.Fatalf("Did not get expected stream info: %+v", si) 3073 } 3074 } 3075 3076 func TestJetStreamClusterAccountInfoAndLimits(t *testing.T) { 3077 c := createJetStreamClusterExplicit(t, "R5S", 5) 3078 defer c.shutdown() 3079 3080 // Adjust our limits. 3081 c.updateLimits("$G", map[string]JetStreamAccountLimits{ 3082 _EMPTY_: { 3083 MaxMemory: 1024, 3084 MaxStore: 8000, 3085 MaxStreams: 3, 3086 MaxConsumers: 1, 3087 }, 3088 }) 3089 3090 // Client based API 3091 s := c.randomServer() 3092 nc, js := jsClientConnect(t, s) 3093 defer nc.Close() 3094 3095 if _, err := js.AddStream(&nats.StreamConfig{Name: "foo", Replicas: 1}); err != nil { 3096 t.Fatalf("Unexpected error: %v", err) 3097 } 3098 if _, err := js.AddStream(&nats.StreamConfig{Name: "bar", Replicas: 2}); err != nil { 3099 t.Fatalf("Unexpected error: %v", err) 3100 } 3101 if _, err := js.AddStream(&nats.StreamConfig{Name: "baz", Replicas: 3}); err != nil { 3102 t.Fatalf("Unexpected error: %v", err) 3103 } 3104 3105 sendBatch := func(subject string, n int) { 3106 t.Helper() 3107 for i := 0; i < n; i++ { 3108 if _, err := js.Publish(subject, []byte("JSC-OK")); err != nil { 3109 t.Fatalf("Unexpected publish error: %v", err) 3110 } 3111 } 3112 } 3113 3114 sendBatch("foo", 25) 3115 sendBatch("bar", 75) 3116 sendBatch("baz", 10) 3117 3118 accountStats := func() *nats.AccountInfo { 3119 t.Helper() 3120 3121 info, err := js.AccountInfo() 3122 if err != nil { 3123 t.Fatalf("Unexpected error: %v", err) 3124 } 3125 return info 3126 } 3127 3128 // If subject is not 3 letters or payload not 2 this needs to change. 3129 const msgSize = uint64(22 + 3 + 6 + 8) 3130 3131 stats := accountStats() 3132 if stats.Streams != 3 { 3133 t.Fatalf("Should have been tracking 3 streams, found %d", stats.Streams) 3134 } 3135 expectedSize := 25*msgSize + 75*msgSize*2 + 10*msgSize*3 3136 // This may lag. 3137 checkFor(t, 5*time.Second, 500*time.Millisecond, func() error { 3138 if stats.Store != expectedSize { 3139 err := fmt.Errorf("Expected store size to be %d, got %+v\n", expectedSize, stats) 3140 stats = accountStats() 3141 return err 3142 3143 } 3144 return nil 3145 }) 3146 3147 // Check limit enforcement. 3148 if _, err := js.AddStream(&nats.StreamConfig{Name: "fail", Replicas: 3}); err == nil { 3149 t.Fatalf("Expected an error but got none") 3150 } 3151 3152 // We should be at 7995 at the moment with a limit of 8000, so any message will go over. 3153 if _, err := js.Publish("baz", []byte("JSC-NOT-OK")); err == nil { 3154 t.Fatalf("Expected publish error but got none") 3155 } 3156 3157 // Check consumers 3158 _, err := js.AddConsumer("foo", &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy}) 3159 if err != nil { 3160 t.Fatalf("Unexpected error: %v", err) 3161 } 3162 3163 // This should fail. 3164 _, err = js.AddConsumer("foo", &nats.ConsumerConfig{Durable: "dlc22", AckPolicy: nats.AckExplicitPolicy}) 3165 if err == nil { 3166 t.Fatalf("Expected error but got none") 3167 } 3168 } 3169 3170 func TestJetStreamClusterStreamLimits(t *testing.T) { 3171 c := createJetStreamClusterExplicit(t, "R3S", 3) 3172 defer c.shutdown() 3173 3174 // Client based API 3175 s := c.randomServer() 3176 nc, js := jsClientConnect(t, s) 3177 defer nc.Close() 3178 3179 // Check that large R will fail. 3180 if _, err := js.AddStream(&nats.StreamConfig{Name: "foo", Replicas: 5}); err == nil { 3181 t.Fatalf("Expected error but got none") 3182 } 3183 3184 maxMsgs := 5 3185 3186 _, err := js.AddStream(&nats.StreamConfig{ 3187 Name: "foo", 3188 Replicas: 3, 3189 Retention: nats.LimitsPolicy, 3190 Discard: DiscardNew, 3191 MaxMsgSize: 11, 3192 MaxMsgs: int64(maxMsgs), 3193 MaxAge: 250 * time.Millisecond, 3194 }) 3195 if err != nil { 3196 t.Fatalf("Unexpected error: %v", err) 3197 } 3198 3199 // Large message should fail. 3200 if _, err := js.Publish("foo", []byte("0123456789ZZZ")); err == nil { 3201 t.Fatalf("Expected publish to fail") 3202 } 3203 3204 for i := 0; i < maxMsgs; i++ { 3205 if _, err := js.Publish("foo", []byte("JSC-OK")); err != nil { 3206 t.Fatalf("Unexpected publish error: %v", err) 3207 } 3208 } 3209 3210 // These should fail. 3211 if _, err := js.Publish("foo", []byte("JSC-OK")); err == nil { 3212 t.Fatalf("Expected publish to fail") 3213 } 3214 3215 // Make sure when space frees up we can send more. 3216 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 3217 si, err := js.StreamInfo("foo") 3218 if err != nil { 3219 t.Fatalf("Unexpected error: %v", err) 3220 } 3221 if si.State.Msgs != 0 { 3222 return fmt.Errorf("Expected 0 msgs, got state: %+v", si.State) 3223 } 3224 return nil 3225 }) 3226 3227 if _, err := js.Publish("foo", []byte("ROUND2")); err != nil { 3228 t.Fatalf("Unexpected error: %v", err) 3229 } 3230 } 3231 3232 func TestJetStreamClusterStreamInterestOnlyPolicy(t *testing.T) { 3233 c := createJetStreamClusterExplicit(t, "R3S", 3) 3234 defer c.shutdown() 3235 3236 // Client based API 3237 s := c.randomServer() 3238 nc, js := jsClientConnect(t, s) 3239 defer nc.Close() 3240 3241 _, err := js.AddStream(&nats.StreamConfig{ 3242 Name: "foo", 3243 Replicas: 3, 3244 Retention: nats.InterestPolicy, 3245 }) 3246 if err != nil { 3247 t.Fatalf("Unexpected error: %v", err) 3248 } 3249 3250 toSend := 10 3251 3252 // With no interest these should be no-ops. 3253 for i := 0; i < toSend; i++ { 3254 if _, err := js.Publish("foo", []byte("JSC-OK")); err != nil { 3255 t.Fatalf("Unexpected publish error: %v", err) 3256 } 3257 } 3258 3259 si, err := js.StreamInfo("foo") 3260 if err != nil { 3261 t.Fatalf("Unexpected error: %v", err) 3262 } 3263 if si.State.Msgs != 0 { 3264 t.Fatalf("Expected no messages with no interest, got %d", si.State.Msgs) 3265 } 3266 3267 // Now create a consumer. 3268 sub, err := js.SubscribeSync("foo", nats.Durable("dlc")) 3269 if err != nil { 3270 t.Fatalf("Unexpected error: %v", err) 3271 } 3272 3273 for i := 0; i < toSend; i++ { 3274 if _, err := js.Publish("foo", []byte("JSC-OK")); err != nil { 3275 t.Fatalf("Unexpected publish error: %v", err) 3276 } 3277 } 3278 checkSubsPending(t, sub, toSend) 3279 3280 si, err = js.StreamInfo("foo") 3281 if err != nil { 3282 t.Fatalf("Unexpected error: %v", err) 3283 } 3284 if si.State.Msgs != uint64(toSend) { 3285 t.Fatalf("Expected %d messages with interest, got %d", toSend, si.State.Msgs) 3286 } 3287 if si.State.FirstSeq != uint64(toSend+1) { 3288 t.Fatalf("Expected first sequence of %d, got %d", toSend+1, si.State.FirstSeq) 3289 } 3290 3291 // Now delete the consumer. 3292 sub.Unsubscribe() 3293 // That should make it go away. 3294 if _, err := js.ConsumerInfo("foo", "dlc"); err == nil { 3295 t.Fatalf("Expected not found error, got none") 3296 } 3297 3298 // Wait for the messages to be purged. 3299 checkFor(t, 5*time.Second, 20*time.Millisecond, func() error { 3300 si, err := js.StreamInfo("foo") 3301 if err != nil { 3302 t.Fatalf("Unexpected error: %v", err) 3303 } 3304 if si.State.Msgs == 0 { 3305 return nil 3306 } 3307 return fmt.Errorf("Wanted 0 messages, got %d", si.State.Msgs) 3308 }) 3309 } 3310 3311 // These are disabled for now. 3312 func TestJetStreamClusterStreamTemplates(t *testing.T) { 3313 c := createJetStreamClusterExplicit(t, "R3S", 3) 3314 defer c.shutdown() 3315 3316 // Client based API 3317 s := c.randomServer() 3318 nc, _ := jsClientConnect(t, s) 3319 defer nc.Close() 3320 3321 // List API 3322 var tListResp JSApiStreamTemplateNamesResponse 3323 resp, err := nc.Request(JSApiTemplates, nil, time.Second) 3324 if err != nil { 3325 t.Fatalf("Unexpected error: %v", err) 3326 } 3327 if err := json.Unmarshal(resp.Data, &tListResp); err != nil { 3328 t.Fatalf("Unexpected error: %v", err) 3329 } 3330 if tListResp.Error == nil { 3331 t.Fatalf("Expected an unsupported error, got none") 3332 } 3333 if !strings.Contains(tListResp.Error.Description, "not currently supported in clustered mode") { 3334 t.Fatalf("Did not get correct error response: %+v", tListResp.Error) 3335 } 3336 3337 // Create 3338 // Now do templates. 3339 mcfg := &StreamConfig{ 3340 Subjects: []string{"kv.*"}, 3341 Storage: MemoryStorage, 3342 } 3343 template := &StreamTemplateConfig{ 3344 Name: "kv", 3345 Config: mcfg, 3346 MaxStreams: 4, 3347 } 3348 req, err := json.Marshal(template) 3349 if err != nil { 3350 t.Fatalf("Unexpected error: %v", err) 3351 } 3352 var stResp JSApiStreamTemplateCreateResponse 3353 resp, err = nc.Request(fmt.Sprintf(JSApiTemplateCreateT, template.Name), req, time.Second) 3354 if err != nil { 3355 t.Fatalf("Unexpected error: %v", err) 3356 } 3357 if err = json.Unmarshal(resp.Data, &stResp); err != nil { 3358 t.Fatalf("Unexpected error: %v", err) 3359 } 3360 if stResp.Error == nil { 3361 t.Fatalf("Expected an unsupported error, got none") 3362 } 3363 if !strings.Contains(stResp.Error.Description, "not currently supported in clustered mode") { 3364 t.Fatalf("Did not get correct error response: %+v", stResp.Error) 3365 } 3366 } 3367 3368 func TestJetStreamClusterExtendedAccountInfo(t *testing.T) { 3369 c := createJetStreamClusterExplicit(t, "R3S", 3) 3370 defer c.shutdown() 3371 3372 // Client based API 3373 s := c.randomServer() 3374 nc, js := jsClientConnect(t, s) 3375 defer nc.Close() 3376 3377 sendBatch := func(subject string, n int) { 3378 t.Helper() 3379 for i := 0; i < n; i++ { 3380 if _, err := js.Publish(subject, []byte("JSC-OK")); err != nil { 3381 t.Fatalf("Unexpected publish error: %v", err) 3382 } 3383 } 3384 } 3385 3386 // Add in some streams with msgs and consumers. 3387 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST-1", Replicas: 2}); err != nil { 3388 t.Fatalf("Unexpected error: %v", err) 3389 } 3390 if _, err := js.SubscribeSync("TEST-1"); err != nil { 3391 t.Fatalf("Unexpected error: %v", err) 3392 } 3393 sendBatch("TEST-1", 25) 3394 3395 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST-2", Replicas: 2}); err != nil { 3396 t.Fatalf("Unexpected error: %v", err) 3397 } 3398 if _, err := js.SubscribeSync("TEST-2"); err != nil { 3399 t.Fatalf("Unexpected error: %v", err) 3400 } 3401 sendBatch("TEST-2", 50) 3402 3403 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST-3", Replicas: 3, Storage: nats.MemoryStorage}); err != nil { 3404 t.Fatalf("Unexpected error: %v", err) 3405 } 3406 if _, err := js.SubscribeSync("TEST-3"); err != nil { 3407 t.Fatalf("Unexpected error: %v", err) 3408 } 3409 sendBatch("TEST-3", 100) 3410 3411 // Go client will lag so use direct for now. 3412 getAccountInfo := func() *nats.AccountInfo { 3413 t.Helper() 3414 3415 info, err := js.AccountInfo() 3416 if err != nil { 3417 t.Fatalf("Unexpected error: %v", err) 3418 } 3419 return info 3420 } 3421 3422 // Wait to accumulate. 3423 time.Sleep(500 * time.Millisecond) 3424 3425 ai := getAccountInfo() 3426 if ai.Streams != 3 || ai.Consumers != 3 { 3427 t.Fatalf("AccountInfo not correct: %+v", ai) 3428 } 3429 if ai.API.Total < 7 { 3430 t.Fatalf("Expected at least 7 total API calls, got %d", ai.API.Total) 3431 } 3432 3433 // Now do a failure to make sure we track API errors. 3434 js.StreamInfo("NO-STREAM") 3435 js.ConsumerInfo("TEST-1", "NO-CONSUMER") 3436 js.ConsumerInfo("TEST-2", "NO-CONSUMER") 3437 js.ConsumerInfo("TEST-3", "NO-CONSUMER") 3438 3439 ai = getAccountInfo() 3440 if ai.API.Errors != 4 { 3441 t.Fatalf("Expected 4 API calls to be errors, got %d", ai.API.Errors) 3442 } 3443 } 3444 3445 func TestJetStreamClusterPeerRemovalAPI(t *testing.T) { 3446 c := createJetStreamClusterExplicit(t, "R5S", 5) 3447 defer c.shutdown() 3448 3449 // Client based API 3450 ml := c.leader() 3451 nc, err := nats.Connect(ml.ClientURL(), nats.UserInfo("admin", "s3cr3t!")) 3452 if err != nil { 3453 t.Fatalf("Failed to create system client: %v", err) 3454 } 3455 defer nc.Close() 3456 3457 // Expect error if unknown peer 3458 req := &JSApiMetaServerRemoveRequest{Server: "S-9"} 3459 jsreq, err := json.Marshal(req) 3460 if err != nil { 3461 t.Fatalf("Unexpected error: %v", err) 3462 } 3463 rmsg, err := nc.Request(JSApiRemoveServer, jsreq, time.Second) 3464 if err != nil { 3465 t.Fatalf("Unexpected error: %v", err) 3466 } 3467 var resp JSApiMetaServerRemoveResponse 3468 if err := json.Unmarshal(rmsg.Data, &resp); err != nil { 3469 t.Fatalf("Unexpected error: %v", err) 3470 } 3471 if resp.Error == nil { 3472 t.Fatalf("Expected an error, got none") 3473 } 3474 3475 sub, err := nc.SubscribeSync(JSAdvisoryServerRemoved) 3476 if err != nil { 3477 t.Fatalf("Unexpected error: %v", err) 3478 } 3479 3480 rs := c.randomNonLeader() 3481 req = &JSApiMetaServerRemoveRequest{Server: rs.Name()} 3482 jsreq, err = json.Marshal(req) 3483 if err != nil { 3484 t.Fatalf("Unexpected error: %v", err) 3485 } 3486 rmsg, err = nc.Request(JSApiRemoveServer, jsreq, time.Second) 3487 if err != nil { 3488 t.Fatalf("Unexpected error: %v", err) 3489 } 3490 resp.Error = nil 3491 if err := json.Unmarshal(rmsg.Data, &resp); err != nil { 3492 t.Fatalf("Unexpected error: %v", err) 3493 } 3494 if resp.Error != nil { 3495 t.Fatalf("Unexpected error: %+v", resp.Error) 3496 } 3497 c.waitOnLeader() 3498 ml = c.leader() 3499 3500 checkSubsPending(t, sub, 1) 3501 madv, _ := sub.NextMsg(0) 3502 var adv JSServerRemovedAdvisory 3503 if err := json.Unmarshal(madv.Data, &adv); err != nil { 3504 t.Fatalf("Unexpected error: %v", err) 3505 } 3506 if adv.Server != rs.Name() { 3507 t.Fatalf("Expected advisory about %s being removed, got %+v", rs.Name(), adv) 3508 } 3509 3510 checkFor(t, 5*time.Second, 250*time.Millisecond, func() error { 3511 for _, s := range ml.JetStreamClusterPeers() { 3512 if s == rs.Name() { 3513 return fmt.Errorf("Still in the peer list") 3514 } 3515 } 3516 return nil 3517 }) 3518 } 3519 3520 func TestJetStreamClusterPeerRemovalAndStreamReassignment(t *testing.T) { 3521 c := createJetStreamClusterExplicit(t, "R5S", 5) 3522 defer c.shutdown() 3523 3524 // Client based API 3525 s := c.randomNonLeader() 3526 nc, js := jsClientConnect(t, s) 3527 defer nc.Close() 3528 3529 si, err := js.AddStream(&nats.StreamConfig{ 3530 Name: "TEST", 3531 Subjects: []string{"foo", "bar"}, 3532 Replicas: 3, 3533 }) 3534 if err != nil { 3535 t.Fatalf("Unexpected error: %v", err) 3536 } 3537 3538 // Admin based API 3539 ml := c.leader() 3540 nc, err = nats.Connect(ml.ClientURL(), nats.UserInfo("admin", "s3cr3t!")) 3541 if err != nil { 3542 t.Fatalf("Failed to create system client: %v", err) 3543 } 3544 defer nc.Close() 3545 3546 // Select the non-leader server for the stream to remove. 3547 if len(si.Cluster.Replicas) < 2 { 3548 t.Fatalf("Not enough replicas found: %+v", si.Cluster) 3549 } 3550 toRemove, cl := si.Cluster.Replicas[0].Name, c.leader() 3551 if toRemove == cl.Name() { 3552 toRemove = si.Cluster.Replicas[1].Name 3553 } 3554 3555 req := &JSApiMetaServerRemoveRequest{Server: toRemove} 3556 jsreq, err := json.Marshal(req) 3557 if err != nil { 3558 t.Fatalf("Unexpected error: %v", err) 3559 } 3560 rmsg, err := nc.Request(JSApiRemoveServer, jsreq, time.Second) 3561 if err != nil { 3562 t.Fatalf("Unexpected error: %v", err) 3563 } 3564 var resp JSApiMetaServerRemoveResponse 3565 if err := json.Unmarshal(rmsg.Data, &resp); err != nil { 3566 t.Fatalf("Unexpected error: %v", err) 3567 } 3568 if resp.Error != nil { 3569 t.Fatalf("Unexpected error: %+v", resp.Error) 3570 } 3571 // In case that server was also meta-leader. 3572 c.waitOnLeader() 3573 3574 checkFor(t, 10*time.Second, 250*time.Millisecond, func() error { 3575 for _, s := range ml.JetStreamClusterPeers() { 3576 if s == toRemove { 3577 return fmt.Errorf("Server still in the peer list") 3578 } 3579 } 3580 return nil 3581 }) 3582 3583 // Now wait until the stream is now current. 3584 checkFor(t, 10*time.Second, 100*time.Millisecond, func() error { 3585 si, err := js.StreamInfo("TEST", nats.MaxWait(time.Second)) 3586 if err != nil { 3587 return fmt.Errorf("Could not fetch stream info: %v", err) 3588 } 3589 // We should not see the old server at all. 3590 for _, p := range si.Cluster.Replicas { 3591 if p.Name == toRemove { 3592 t.Fatalf("Peer not removed yet: %+v", toRemove) 3593 } 3594 if !p.Current { 3595 return fmt.Errorf("Expected replica to be current: %+v", p) 3596 } 3597 } 3598 if len(si.Cluster.Replicas) != 2 { 3599 return fmt.Errorf("Expected 2 replicas, got %d", len(si.Cluster.Replicas)) 3600 } 3601 return nil 3602 }) 3603 } 3604 3605 func TestJetStreamClusterPeerRemovalAndStreamReassignmentWithoutSpace(t *testing.T) { 3606 c := createJetStreamClusterExplicit(t, "R3S", 3) 3607 defer c.shutdown() 3608 3609 // Client based API 3610 s := c.randomNonLeader() 3611 nc, js := jsClientConnect(t, s) 3612 defer nc.Close() 3613 3614 si, err := js.AddStream(&nats.StreamConfig{ 3615 Name: "TEST", 3616 Subjects: []string{"foo", "bar"}, 3617 Replicas: 3, 3618 }) 3619 if err != nil { 3620 t.Fatalf("Unexpected error: %v", err) 3621 } 3622 3623 // Admin based API 3624 ml := c.leader() 3625 nc, err = nats.Connect(ml.ClientURL(), nats.UserInfo("admin", "s3cr3t!")) 3626 if err != nil { 3627 t.Fatalf("Failed to create system client: %v", err) 3628 } 3629 defer nc.Close() 3630 3631 // Select the non-leader server for the stream to remove. 3632 if len(si.Cluster.Replicas) < 2 { 3633 t.Fatalf("Not enough replicas found: %+v", si.Cluster) 3634 } 3635 toRemove, cl := si.Cluster.Replicas[0].Name, c.leader() 3636 if toRemove == cl.Name() { 3637 toRemove = si.Cluster.Replicas[1].Name 3638 } 3639 3640 req := &JSApiMetaServerRemoveRequest{Server: toRemove} 3641 jsreq, err := json.Marshal(req) 3642 if err != nil { 3643 t.Fatalf("Unexpected error: %v", err) 3644 } 3645 rmsg, err := nc.Request(JSApiRemoveServer, jsreq, 2*time.Second) 3646 if err != nil { 3647 t.Fatalf("Unexpected error: %v", err) 3648 } 3649 3650 var resp JSApiMetaServerRemoveResponse 3651 if err := json.Unmarshal(rmsg.Data, &resp); err != nil { 3652 t.Fatalf("Unexpected error: %v", err) 3653 } 3654 if resp.Error != nil { 3655 t.Fatalf("Unexpected error: %+v", resp.Error) 3656 } 3657 checkFor(t, 10*time.Second, 250*time.Millisecond, func() error { 3658 for _, s := range ml.JetStreamClusterPeers() { 3659 if s == toRemove { 3660 return fmt.Errorf("Server still in the peer list") 3661 } 3662 } 3663 return nil 3664 }) 3665 // Make sure only 2 peers at this point. 3666 c.waitOnPeerCount(2) 3667 3668 // Now wait until the stream is now current. 3669 streamCurrent := func(nr int) { 3670 checkFor(t, 10*time.Second, 100*time.Millisecond, func() error { 3671 si, err := js.StreamInfo("TEST", nats.MaxWait(time.Second)) 3672 if err != nil { 3673 return fmt.Errorf("Could not fetch stream info: %v", err) 3674 } 3675 // We should not see the old server at all. 3676 for _, p := range si.Cluster.Replicas { 3677 if p.Name == toRemove { 3678 return fmt.Errorf("Peer not removed yet: %+v", toRemove) 3679 } 3680 if !p.Current { 3681 return fmt.Errorf("Expected replica to be current: %+v", p) 3682 } 3683 } 3684 if len(si.Cluster.Replicas) != nr { 3685 return fmt.Errorf("Expected %d replicas, got %d", nr, len(si.Cluster.Replicas)) 3686 } 3687 return nil 3688 }) 3689 } 3690 3691 // Make sure the peer was removed from the stream and that we did not fill the new spot. 3692 streamCurrent(1) 3693 3694 // Now add in a new server and make sure it gets added to our stream. 3695 c.addInNewServer() 3696 c.waitOnPeerCount(3) 3697 3698 streamCurrent(2) 3699 } 3700 3701 func TestJetStreamClusterPeerExclusionTag(t *testing.T) { 3702 c := createJetStreamClusterWithTemplateAndModHook(t, jsClusterTempl, "C", 3, 3703 func(serverName, clusterName, storeDir, conf string) string { 3704 switch serverName { 3705 case "S-1": 3706 return fmt.Sprintf("%s\nserver_tags: [server:%s, intersect, %s]", conf, serverName, jsExcludePlacement) 3707 case "S-2": 3708 return fmt.Sprintf("%s\nserver_tags: [server:%s, intersect]", conf, serverName) 3709 default: 3710 return fmt.Sprintf("%s\nserver_tags: [server:%s]", conf, serverName) 3711 } 3712 }) 3713 defer c.shutdown() 3714 3715 nc, js := jsClientConnect(t, c.randomServer()) 3716 defer nc.Close() 3717 3718 for i, c := range []nats.StreamConfig{ 3719 {Replicas: 1, Placement: &nats.Placement{Tags: []string{"server:S-1"}}}, 3720 {Replicas: 2, Placement: &nats.Placement{Tags: []string{"intersect"}}}, 3721 {Replicas: 3}, // not enough server without !jetstream 3722 } { 3723 c.Name = fmt.Sprintf("TEST%d", i) 3724 c.Subjects = []string{c.Name} 3725 _, err := js.AddStream(&c) 3726 require_Error(t, err) 3727 require_Contains(t, err.Error(), "no suitable peers for placement", "exclude tag set") 3728 } 3729 3730 // Test update failure 3731 cfg := &nats.StreamConfig{Name: "TEST", Subjects: []string{"foo"}, Replicas: 2} 3732 _, err := js.AddStream(cfg) 3733 require_NoError(t, err) 3734 3735 cfg.Replicas = 3 3736 _, err = js.UpdateStream(cfg) 3737 require_Error(t, err) 3738 require_Contains(t, err.Error(), "no suitable peers for placement", "exclude tag set") 3739 // Test tag reload removing !jetstream tag, and allowing placement again 3740 3741 srv := c.serverByName("S-1") 3742 3743 v, err := srv.Varz(nil) 3744 require_NoError(t, err) 3745 require_True(t, v.Tags.Contains(jsExcludePlacement)) 3746 content, err := os.ReadFile(srv.configFile) 3747 require_NoError(t, err) 3748 newContent := strings.ReplaceAll(string(content), fmt.Sprintf(", %s]", jsExcludePlacement), "]") 3749 changeCurrentConfigContentWithNewContent(t, srv.configFile, []byte(newContent)) 3750 3751 ncSys := natsConnect(t, c.randomServer().ClientURL(), nats.UserInfo("admin", "s3cr3t!")) 3752 defer ncSys.Close() 3753 sub, err := ncSys.SubscribeSync(fmt.Sprintf("$SYS.SERVER.%s.STATSZ", srv.ID())) 3754 require_NoError(t, err) 3755 3756 require_NoError(t, srv.Reload()) 3757 v, err = srv.Varz(nil) 3758 require_NoError(t, err) 3759 require_True(t, !v.Tags.Contains(jsExcludePlacement)) 3760 3761 // it is possible that sub already received a stasz message prior to reload, retry once 3762 cmp := false 3763 for i := 0; i < 2 && !cmp; i++ { 3764 m, err := sub.NextMsg(time.Second) 3765 require_NoError(t, err) 3766 cmp = strings.Contains(string(m.Data), `"tags":["server:s-1","intersect"]`) 3767 } 3768 require_True(t, cmp) 3769 3770 cfg.Replicas = 3 3771 _, err = js.UpdateStream(cfg) 3772 require_NoError(t, err) 3773 } 3774 3775 func TestJetStreamClusterAccountPurge(t *testing.T) { 3776 sysKp, syspub := createKey(t) 3777 sysJwt := encodeClaim(t, jwt.NewAccountClaims(syspub), syspub) 3778 sysCreds := newUser(t, sysKp) 3779 accKp, accpub := createKey(t) 3780 accClaim := jwt.NewAccountClaims(accpub) 3781 accClaim.Limits.JetStreamLimits.DiskStorage = 1024 * 1024 * 5 3782 accClaim.Limits.JetStreamLimits.MemoryStorage = 1024 * 1024 * 5 3783 accJwt := encodeClaim(t, accClaim, accpub) 3784 accCreds := newUser(t, accKp) 3785 3786 tmlp := ` 3787 listen: 127.0.0.1:-1 3788 server_name: %s 3789 jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} 3790 leaf { 3791 listen: 127.0.0.1:-1 3792 } 3793 cluster { 3794 name: %s 3795 listen: 127.0.0.1:%d 3796 routes = [%s] 3797 } 3798 ` 3799 c := createJetStreamClusterWithTemplateAndModHook(t, tmlp, "cluster", 3, 3800 func(serverName, clustername, storeDir, conf string) string { 3801 return conf + fmt.Sprintf(` 3802 operator: %s 3803 system_account: %s 3804 resolver: { 3805 type: full 3806 dir: '%s/jwt' 3807 timeout: "10ms" 3808 }`, ojwt, syspub, storeDir) 3809 }) 3810 defer c.shutdown() 3811 3812 c.waitOnLeader() 3813 3814 updateJwt(t, c.randomServer().ClientURL(), sysCreds, sysJwt, 3) 3815 updateJwt(t, c.randomServer().ClientURL(), sysCreds, accJwt, 3) 3816 3817 c.waitOnAccount(accpub) 3818 3819 createTestData := func(t *testing.T) { 3820 nc, js := jsClientConnect(t, c.randomNonLeader(), nats.UserCredentials(accCreds)) 3821 defer nc.Close() 3822 3823 _, err := js.AddStream(&nats.StreamConfig{ 3824 Name: "TEST1", 3825 Subjects: []string{"foo"}, 3826 Replicas: 3, 3827 }) 3828 require_NoError(t, err) 3829 c.waitOnStreamLeader(accpub, "TEST1") 3830 3831 ci, err := js.AddConsumer("TEST1", 3832 &nats.ConsumerConfig{Durable: "DUR1", 3833 AckPolicy: nats.AckExplicitPolicy}) 3834 require_NoError(t, err) 3835 require_True(t, ci.Config.Replicas == 0) 3836 3837 ci, err = js.AddConsumer("TEST1", 3838 &nats.ConsumerConfig{Durable: "DUR2", 3839 AckPolicy: nats.AckExplicitPolicy, 3840 Replicas: 1}) 3841 require_NoError(t, err) 3842 require_True(t, ci.Config.Replicas == 1) 3843 3844 toSend := uint64(1_000) 3845 for i := uint64(0); i < toSend; i++ { 3846 _, err = js.Publish("foo", nil) 3847 require_NoError(t, err) 3848 } 3849 3850 _, err = js.AddStream(&nats.StreamConfig{ 3851 Name: "TEST2", 3852 Subjects: []string{"bar"}, 3853 Replicas: 1, 3854 }) 3855 require_NoError(t, err) 3856 3857 ci, err = js.AddConsumer("TEST2", 3858 &nats.ConsumerConfig{Durable: "DUR1", 3859 AckPolicy: nats.AckExplicitPolicy, 3860 Replicas: 0}) 3861 require_NoError(t, err) 3862 require_True(t, ci.Config.Replicas == 0) 3863 3864 for i := uint64(0); i < toSend; i++ { 3865 _, err = js.Publish("bar", nil) 3866 require_NoError(t, err) 3867 } 3868 } 3869 3870 inspectDirs := func(t *testing.T, sysTotal, accTotal int) error { 3871 t.Helper() 3872 sysDirs := 0 3873 accDirs := 0 3874 for _, s := range c.servers { 3875 files, err := os.ReadDir(filepath.Join(s.getOpts().StoreDir, "jetstream", syspub, "_js_")) 3876 require_NoError(t, err) 3877 sysDirs += len(files) - 1 // sub 1 for _meta_ 3878 files, err = os.ReadDir(filepath.Join(s.getOpts().StoreDir, "jetstream", accpub, "streams")) 3879 if err == nil || err.(*os.PathError).Error() == "no such file or directory" { 3880 accDirs += len(files) 3881 } 3882 } 3883 if sysDirs != sysTotal || accDirs != accTotal { 3884 return fmt.Errorf("expected directory count does not match %d == %d, %d == %d", 3885 sysDirs, sysTotal, accDirs, accTotal) 3886 } 3887 return nil 3888 } 3889 3890 checkForDirs := func(t *testing.T, sysTotal, accTotal int) { 3891 t.Helper() 3892 checkFor(t, 20*time.Second, 250*time.Millisecond, func() error { 3893 return inspectDirs(t, sysTotal, accTotal) 3894 }) 3895 } 3896 3897 purge := func(t *testing.T) { 3898 t.Helper() 3899 ncsys, err := nats.Connect(c.randomServer().ClientURL(), nats.UserCredentials(sysCreds)) 3900 require_NoError(t, err) 3901 defer ncsys.Close() 3902 3903 request := func() error { 3904 var resp JSApiAccountPurgeResponse 3905 m, err := ncsys.Request(fmt.Sprintf(JSApiAccountPurgeT, accpub), nil, time.Second) 3906 if err != nil { 3907 return err 3908 } 3909 if err := json.Unmarshal(m.Data, &resp); err != nil { 3910 return err 3911 } 3912 if !resp.Initiated { 3913 return fmt.Errorf("not started") 3914 } 3915 return nil 3916 } 3917 checkFor(t, 30*time.Second, 250*time.Millisecond, request) 3918 } 3919 3920 t.Run("startup-cleanup", func(t *testing.T) { 3921 _, newCleanupAcc1 := createKey(t) 3922 _, newCleanupAcc2 := createKey(t) 3923 for _, s := range c.servers { 3924 os.MkdirAll(filepath.Join(s.getOpts().StoreDir, JetStreamStoreDir, newCleanupAcc1, streamsDir), defaultDirPerms) 3925 os.MkdirAll(filepath.Join(s.getOpts().StoreDir, JetStreamStoreDir, newCleanupAcc2), defaultDirPerms) 3926 } 3927 createTestData(t) 3928 checkForDirs(t, 6, 4) 3929 c.stopAll() 3930 c.restartAll() 3931 for _, s := range c.servers { 3932 accDir := filepath.Join(s.getOpts().StoreDir, JetStreamStoreDir, newCleanupAcc1) 3933 _, e := os.Stat(filepath.Join(accDir, streamsDir)) 3934 require_Error(t, e) 3935 require_True(t, os.IsNotExist(e)) 3936 _, e = os.Stat(accDir) 3937 require_Error(t, e) 3938 require_True(t, os.IsNotExist(e)) 3939 _, e = os.Stat(filepath.Join(s.getOpts().StoreDir, JetStreamStoreDir, newCleanupAcc2)) 3940 require_Error(t, e) 3941 require_True(t, os.IsNotExist(e)) 3942 } 3943 checkForDirs(t, 6, 4) 3944 // Make sure we have a leader for all assets before moving to the next test 3945 c.waitOnStreamLeader(accpub, "TEST1") 3946 c.waitOnConsumerLeader(accpub, "TEST1", "DUR1") 3947 c.waitOnConsumerLeader(accpub, "TEST1", "DUR2") 3948 c.waitOnStreamLeader(accpub, "TEST2") 3949 c.waitOnConsumerLeader(accpub, "TEST2", "DUR1") 3950 }) 3951 3952 t.Run("purge-with-restart", func(t *testing.T) { 3953 createTestData(t) 3954 checkForDirs(t, 6, 4) 3955 purge(t) 3956 checkForDirs(t, 0, 0) 3957 c.stopAll() 3958 c.restartAll() 3959 checkForDirs(t, 0, 0) 3960 }) 3961 3962 t.Run("purge-with-reuse", func(t *testing.T) { 3963 createTestData(t) 3964 checkForDirs(t, 6, 4) 3965 purge(t) 3966 checkForDirs(t, 0, 0) 3967 createTestData(t) 3968 checkForDirs(t, 6, 4) 3969 purge(t) 3970 checkForDirs(t, 0, 0) 3971 }) 3972 3973 t.Run("purge-deleted-account", func(t *testing.T) { 3974 createTestData(t) 3975 checkForDirs(t, 6, 4) 3976 c.stopAll() 3977 for _, s := range c.servers { 3978 require_NoError(t, os.Remove(s.getOpts().StoreDir+"/jwt/"+accpub+".jwt")) 3979 } 3980 c.restartAll() 3981 checkForDirs(t, 6, 4) 3982 purge(t) 3983 checkForDirs(t, 0, 0) 3984 c.stopAll() 3985 c.restartAll() 3986 checkForDirs(t, 0, 0) 3987 }) 3988 } 3989 3990 func TestJetStreamClusterScaleConsumer(t *testing.T) { 3991 c := createJetStreamClusterWithTemplate(t, jsClusterTempl, "C", 3) 3992 defer c.shutdown() 3993 3994 srv := c.randomNonLeader() 3995 nc, js := jsClientConnect(t, srv) 3996 defer nc.Close() 3997 3998 si, err := js.AddStream(&nats.StreamConfig{ 3999 Name: "TEST", 4000 Subjects: []string{"foo"}, 4001 Replicas: 3, 4002 }) 4003 require_NoError(t, err) 4004 4005 durCfg := &nats.ConsumerConfig{Durable: "DUR", AckPolicy: nats.AckExplicitPolicy} 4006 ci, err := js.AddConsumer("TEST", durCfg) 4007 require_NoError(t, err) 4008 require_True(t, ci.Config.Replicas == 0) 4009 4010 toSend := uint64(1_000) 4011 for i := uint64(0); i < toSend; i++ { 4012 _, err = js.Publish("foo", nil) 4013 require_NoError(t, err) 4014 } 4015 4016 s, err := js.PullSubscribe("foo", "DUR") 4017 require_NoError(t, err) 4018 4019 consumeOne := func(expSeq uint64) error { 4020 if ci, err := js.ConsumerInfo("TEST", "DUR"); err != nil { 4021 return err 4022 } else if ci.Delivered.Stream != expSeq { 4023 return fmt.Errorf("pre: not expected delivered stream %d, got %d", expSeq, ci.Delivered.Stream) 4024 } else if ci.Delivered.Consumer != expSeq { 4025 return fmt.Errorf("pre: not expected delivered consumer %d, got %d", expSeq, ci.Delivered.Consumer) 4026 } else if ci.AckFloor.Stream != expSeq { 4027 return fmt.Errorf("pre: not expected ack stream %d, got %d", expSeq, ci.AckFloor.Stream) 4028 } else if ci.AckFloor.Consumer != expSeq { 4029 return fmt.Errorf("pre: not expected ack consumer %d, got %d", expSeq, ci.AckFloor.Consumer) 4030 } 4031 if m, err := s.Fetch(1); err != nil { 4032 return err 4033 } else if err := m[0].AckSync(); err != nil { 4034 return err 4035 } 4036 expSeq = expSeq + 1 4037 if ci, err := js.ConsumerInfo("TEST", "DUR"); err != nil { 4038 return err 4039 } else if ci.Delivered.Stream != expSeq { 4040 return fmt.Errorf("post: not expected delivered stream %d, got %d", expSeq, ci.Delivered.Stream) 4041 } else if ci.Delivered.Consumer != expSeq { 4042 return fmt.Errorf("post: not expected delivered consumer %d, got %d", expSeq, ci.Delivered.Consumer) 4043 } else if ci.AckFloor.Stream != expSeq { 4044 return fmt.Errorf("post: not expected ack stream %d, got %d", expSeq, ci.AckFloor.Stream) 4045 } else if ci.AckFloor.Consumer != expSeq { 4046 return fmt.Errorf("post: not expected ack consumer %d, got %d", expSeq, ci.AckFloor.Consumer) 4047 } 4048 return nil 4049 } 4050 4051 require_NoError(t, consumeOne(0)) 4052 4053 // scale down, up, down and up to default == 3 again 4054 for i, r := range []int{1, 3, 1, 0} { 4055 durCfg.Replicas = r 4056 if r == 0 { 4057 r = si.Config.Replicas 4058 } 4059 js.UpdateConsumer("TEST", durCfg) 4060 4061 checkFor(t, time.Second*30, time.Millisecond*250, func() error { 4062 if ci, err = js.ConsumerInfo("TEST", "DUR"); err != nil { 4063 return err 4064 } else if ci.Cluster.Leader == _EMPTY_ { 4065 return fmt.Errorf("no leader") 4066 } else if len(ci.Cluster.Replicas) != r-1 { 4067 return fmt.Errorf("not enough replica, got %d wanted %d", len(ci.Cluster.Replicas), r-1) 4068 } else { 4069 for _, r := range ci.Cluster.Replicas { 4070 if !r.Current || r.Offline || r.Lag != 0 { 4071 return fmt.Errorf("replica %s not current %t offline %t lag %d", r.Name, r.Current, r.Offline, r.Lag) 4072 } 4073 } 4074 } 4075 return nil 4076 }) 4077 4078 require_NoError(t, consumeOne(uint64(i+1))) 4079 } 4080 } 4081 4082 func TestJetStreamClusterConsumerScaleUp(t *testing.T) { 4083 c := createJetStreamCluster(t, jsClusterTempl, "HUB", _EMPTY_, 3, 22020, true) 4084 defer c.shutdown() 4085 4086 // Client based API 4087 srv := c.randomNonLeader() 4088 nc, js := jsClientConnect(t, srv) 4089 defer nc.Close() 4090 4091 scfg := nats.StreamConfig{ 4092 Name: "TEST", 4093 Subjects: []string{"foo"}, 4094 Replicas: 1, 4095 } 4096 _, err := js.AddStream(&scfg) 4097 require_NoError(t, err) 4098 defer js.DeleteStream("TEST") 4099 4100 dcfg := nats.ConsumerConfig{ 4101 Durable: "DUR", 4102 AckPolicy: nats.AckExplicitPolicy, 4103 Replicas: 0} 4104 _, err = js.AddConsumer("TEST", &dcfg) 4105 require_NoError(t, err) 4106 4107 for i := 0; i < 100; i++ { 4108 _, err = js.Publish("foo", nil) 4109 require_NoError(t, err) 4110 } 4111 4112 scfg.Replicas = 2 4113 _, err = js.UpdateStream(&scfg) 4114 require_NoError(t, err) 4115 4116 // The scale up issue shows itself as permanent loss of consumer leadership 4117 // So give it some time for the change to propagate to new consumer peers and the quorum to disrupt 4118 // 2 seconds is a value arrived by experimentally, no sleep or a sleep of 1sec always had the test pass a lot. 4119 time.Sleep(2 * time.Second) 4120 4121 c.waitOnStreamLeader("$G", "TEST") 4122 4123 // There is also a timing component to the issue triggering. 4124 c.waitOnConsumerLeader("$G", "TEST", "DUR") 4125 } 4126 4127 func TestJetStreamClusterPeerOffline(t *testing.T) { 4128 c := createJetStreamClusterExplicit(t, "R5S", 5) 4129 defer c.shutdown() 4130 4131 ml := c.leader() 4132 rs := c.randomNonLeader() 4133 4134 checkPeer := func(ml, rs *Server, shouldBeOffline bool) { 4135 t.Helper() 4136 4137 checkFor(t, 5*time.Second, 50*time.Millisecond, func() error { 4138 var found bool 4139 for _, s := range ml.JetStreamClusterPeers() { 4140 if s == rs.Name() { 4141 found = true 4142 break 4143 } 4144 } 4145 if !shouldBeOffline && !found { 4146 return fmt.Errorf("Server %q not in the peers list", rs.Name()) 4147 } else if shouldBeOffline && found { 4148 return fmt.Errorf("Server %q should not be in the peers list", rs.Name()) 4149 } 4150 4151 var ok bool 4152 ml.nodeToInfo.Range(func(k, v any) bool { 4153 if si := v.(nodeInfo); si.name == rs.Name() { 4154 if shouldBeOffline && si.offline || !shouldBeOffline && !si.offline { 4155 ok = true 4156 return false 4157 } 4158 } 4159 return true 4160 }) 4161 if !ok { 4162 if shouldBeOffline { 4163 return fmt.Errorf("Server %q should be marked as online", rs.Name()) 4164 } 4165 return fmt.Errorf("Server %q is still marked as online", rs.Name()) 4166 } 4167 return nil 4168 }) 4169 } 4170 4171 // Shutdown the server and make sure that it is now showing as offline. 4172 rs.Shutdown() 4173 checkPeer(ml, rs, true) 4174 4175 // Now restart that server and check that is no longer offline. 4176 oldrs := rs 4177 rs, _ = RunServerWithConfig(rs.getOpts().ConfigFile) 4178 defer rs.Shutdown() 4179 4180 // Replaced old with new server 4181 for i := 0; i < len(c.servers); i++ { 4182 if c.servers[i] == oldrs { 4183 c.servers[i] = rs 4184 } 4185 } 4186 4187 // Wait for cluster to be formed 4188 checkClusterFormed(t, c.servers...) 4189 4190 // Make sure that we have a leader (there can always be a re-election) 4191 c.waitOnLeader() 4192 ml = c.leader() 4193 4194 // Now check that rs is not offline 4195 checkPeer(ml, rs, false) 4196 } 4197 4198 func TestJetStreamClusterNoQuorumStepdown(t *testing.T) { 4199 c := createJetStreamClusterExplicit(t, "R3S", 3) 4200 defer c.shutdown() 4201 4202 // Client based API 4203 s := c.randomServer() 4204 nc, js := jsClientConnect(t, s) 4205 defer nc.Close() 4206 4207 // Setup subscription for leader elected. 4208 lesub, err := nc.SubscribeSync(JSAdvisoryStreamLeaderElectedPre + ".*") 4209 if err != nil { 4210 t.Fatalf("Unexpected error: %v", err) 4211 } 4212 4213 if _, err := js.AddStream(&nats.StreamConfig{Name: "NO-Q", Replicas: 2}); err != nil { 4214 t.Fatalf("Unexpected error: %v", err) 4215 } 4216 4217 // Make sure we received our leader elected advisory. 4218 leadv, _ := lesub.NextMsg(0) 4219 if leadv == nil { 4220 t.Fatalf("Expected to receive a leader elected advisory") 4221 } 4222 var le JSStreamLeaderElectedAdvisory 4223 if err := json.Unmarshal(leadv.Data, &le); err != nil { 4224 t.Fatalf("Unexpected error: %v", err) 4225 } 4226 if ln := c.streamLeader("$G", "NO-Q").Name(); le.Leader != ln { 4227 t.Fatalf("Expected to have leader %q in elect advisory, got %q", ln, le.Leader) 4228 } 4229 4230 payload := []byte("Hello JSC") 4231 for i := 0; i < 10; i++ { 4232 if _, err := js.Publish("NO-Q", payload); err != nil { 4233 t.Fatalf("Unexpected error: %v", err) 4234 } 4235 } 4236 4237 // Setup subscription for leader elected. 4238 clesub, err := nc.SubscribeSync(JSAdvisoryConsumerLeaderElectedPre + ".*.*") 4239 if err != nil { 4240 t.Fatalf("Unexpected error: %v", err) 4241 } 4242 4243 // Make durable to have R match Stream. 4244 sub, err := js.SubscribeSync("NO-Q", nats.Durable("rr")) 4245 if err != nil { 4246 t.Fatalf("Unexpected error: %v", err) 4247 } 4248 ci, err := sub.ConsumerInfo() 4249 if err != nil || ci == nil { 4250 t.Fatalf("Unexpected error: %v", err) 4251 } 4252 4253 // Make sure we received our consumer leader elected advisory. 4254 leadv, _ = clesub.NextMsg(0) 4255 if leadv == nil { 4256 t.Fatalf("Expected to receive a consumer leader elected advisory") 4257 } 4258 4259 // Shutdown the non-leader. 4260 c.randomNonStreamLeader("$G", "NO-Q").Shutdown() 4261 4262 // This should eventually have us stepdown as leader since we would have lost quorum with R=2. 4263 checkFor(t, 5*time.Second, 500*time.Millisecond, func() error { 4264 if sl := c.streamLeader("$G", "NO-Q"); sl == nil { 4265 return nil 4266 } 4267 return fmt.Errorf("Still have leader for stream") 4268 }) 4269 4270 notAvailableErr := func(err error) bool { 4271 return err != nil && (strings.Contains(err.Error(), "unavailable") || err == context.DeadlineExceeded) 4272 } 4273 4274 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 4275 if cl := c.consumerLeader("$G", "NO-Q", ci.Name); cl == nil { 4276 return nil 4277 } 4278 return fmt.Errorf("Still have leader for consumer") 4279 }) 4280 4281 if _, err = js.ConsumerInfo("NO-Q", ci.Name); !notAvailableErr(err) { 4282 t.Fatalf("Expected an 'unavailable' error, got %v", err) 4283 } 4284 if _, err := sub.ConsumerInfo(); !notAvailableErr(err) { 4285 t.Fatalf("Expected an 'unavailable' error, got %v", err) 4286 } 4287 4288 // Now let's take out the other non meta-leader 4289 // We should get same error for general API calls. 4290 c.randomNonLeader().Shutdown() 4291 c.expectNoLeader() 4292 4293 // Now make sure the general JS API responds with system unavailable. 4294 if _, err = js.AccountInfo(); !notAvailableErr(err) { 4295 t.Fatalf("Expected an 'unavailable' error, got %v", err) 4296 } 4297 if _, err := js.AddStream(&nats.StreamConfig{Name: "NO-Q33", Replicas: 2}); !notAvailableErr(err) { 4298 t.Fatalf("Expected an 'unavailable' error, got %v", err) 4299 } 4300 if _, err := js.UpdateStream(&nats.StreamConfig{Name: "NO-Q33", Replicas: 2}); !notAvailableErr(err) { 4301 t.Fatalf("Expected an 'unavailable' error, got %v", err) 4302 } 4303 if err := js.DeleteStream("NO-Q"); !notAvailableErr(err) { 4304 t.Fatalf("Expected an 'unavailable' error, got %v", err) 4305 } 4306 if err := js.PurgeStream("NO-Q"); !notAvailableErr(err) { 4307 t.Fatalf("Expected an 'unavailable' error, got %v", err) 4308 } 4309 if err := js.DeleteMsg("NO-Q", 1); !notAvailableErr(err) { 4310 t.Fatalf("Expected an 'unavailable' error, got %v", err) 4311 } 4312 // Consumer 4313 if _, err := js.AddConsumer("NO-Q", &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy}); !notAvailableErr(err) { 4314 t.Fatalf("Expected an 'unavailable' error, got %v", err) 4315 } 4316 if err := js.DeleteConsumer("NO-Q", "dlc"); !notAvailableErr(err) { 4317 t.Fatalf("Expected an 'unavailable' error, got %v", err) 4318 } 4319 if _, err := js.ConsumerInfo("NO-Q", "dlc"); !notAvailableErr(err) { 4320 t.Fatalf("Expected an 'unavailable' error, got %v", err) 4321 } 4322 // Listers 4323 for info := range js.StreamsInfo() { 4324 t.Fatalf("Unexpected stream info, got %v", info) 4325 } 4326 for info := range js.ConsumersInfo("NO-Q") { 4327 t.Fatalf("Unexpected consumer info, got %v", info) 4328 } 4329 } 4330 4331 func TestJetStreamClusterCreateResponseAdvisoriesHaveSubject(t *testing.T) { 4332 c := createJetStreamClusterExplicit(t, "R3S", 3) 4333 defer c.shutdown() 4334 4335 // Client based API 4336 s := c.randomServer() 4337 nc, js := jsClientConnect(t, s) 4338 defer nc.Close() 4339 4340 sub, err := nc.SubscribeSync("$JS.EVENT.ADVISORY.API") 4341 if err != nil { 4342 t.Fatalf("Unexpected error: %v", err) 4343 } 4344 defer sub.Unsubscribe() 4345 4346 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 2}); err != nil { 4347 t.Fatalf("Unexpected error: %v", err) 4348 } 4349 if _, err := js.SubscribeSync("TEST", nats.Durable("DLC")); err != nil { 4350 t.Fatalf("Unexpected error: %v", err) 4351 } 4352 if err := js.PurgeStream("TEST"); err != nil { 4353 t.Fatalf("Unexpected error: %v", err) 4354 } 4355 if err := js.DeleteStream("TEST"); err != nil { 4356 t.Fatalf("Unexpected error: %v", err) 4357 } 4358 4359 checkSubsPending(t, sub, 6) 4360 4361 for m, err := sub.NextMsg(0); err == nil; m, err = sub.NextMsg(0) { 4362 var audit JSAPIAudit 4363 if err := json.Unmarshal(m.Data, &audit); err != nil { 4364 t.Fatalf("Unexpected error: %v", err) 4365 } 4366 if audit.Subject == _EMPTY_ { 4367 t.Fatalf("Expected subject, got nothing") 4368 } 4369 } 4370 } 4371 4372 func TestJetStreamClusterRestartAndRemoveAdvisories(t *testing.T) { 4373 // FIXME(dlc) - Flaky on Travis, skip for now. 4374 skip(t) 4375 4376 c := createJetStreamClusterExplicit(t, "R3S", 3) 4377 defer c.shutdown() 4378 4379 // Client based API 4380 s := c.randomServer() 4381 nc, js := jsClientConnect(t, s) 4382 defer nc.Close() 4383 4384 sub, err := nc.SubscribeSync("$JS.EVENT.ADVISORY.API") 4385 if err != nil { 4386 t.Fatalf("Unexpected error: %v", err) 4387 } 4388 defer sub.Unsubscribe() 4389 4390 csub, err := nc.SubscribeSync("$JS.EVENT.ADVISORY.*.CREATED.>") 4391 if err != nil { 4392 t.Fatalf("Unexpected error: %v", err) 4393 } 4394 defer csub.Unsubscribe() 4395 nc.Flush() 4396 4397 sendBatch := func(subject string, n int) { 4398 t.Helper() 4399 for i := 0; i < n; i++ { 4400 if _, err := js.Publish(subject, []byte("JSC-OK")); err != nil { 4401 t.Fatalf("Unexpected publish error: %v", err) 4402 } 4403 } 4404 } 4405 4406 // Add in some streams with msgs and consumers. 4407 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST-1", Replicas: 2}); err != nil { 4408 t.Fatalf("Unexpected error: %v", err) 4409 } 4410 if _, err := js.SubscribeSync("TEST-1", nats.Durable("DC")); err != nil { 4411 t.Fatalf("Unexpected error: %v", err) 4412 } 4413 sendBatch("TEST-1", 25) 4414 4415 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST-2", Replicas: 2}); err != nil { 4416 t.Fatalf("Unexpected error: %v", err) 4417 } 4418 if _, err := js.SubscribeSync("TEST-2", nats.Durable("DC")); err != nil { 4419 t.Fatalf("Unexpected error: %v", err) 4420 } 4421 sendBatch("TEST-2", 50) 4422 4423 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST-3", Replicas: 3, Storage: nats.MemoryStorage}); err != nil { 4424 t.Fatalf("Unexpected error: %v", err) 4425 } 4426 if _, err := js.SubscribeSync("TEST-3", nats.Durable("DC")); err != nil { 4427 t.Fatalf("Unexpected error: %v", err) 4428 } 4429 sendBatch("TEST-3", 100) 4430 4431 drainSub := func(sub *nats.Subscription) { 4432 for _, err := sub.NextMsg(0); err == nil; _, err = sub.NextMsg(0) { 4433 } 4434 } 4435 4436 // Wait for the advisories for all streams and consumers. 4437 checkSubsPending(t, sub, 12) // 3 streams, 3*2 consumers, 3 stream names lookups for creating consumers. 4438 drainSub(sub) 4439 4440 // Created audit events. 4441 checkSubsPending(t, csub, 6) 4442 drainSub(csub) 4443 4444 usub, err := nc.SubscribeSync("$JS.EVENT.ADVISORY.*.UPDATED.>") 4445 if err != nil { 4446 t.Fatalf("Unexpected error: %v", err) 4447 } 4448 defer usub.Unsubscribe() 4449 nc.Flush() 4450 4451 checkSubsPending(t, csub, 0) 4452 checkSubsPending(t, sub, 0) 4453 checkSubsPending(t, usub, 0) 4454 4455 // Now restart the other two servers we are not connected to. 4456 for _, cs := range c.servers { 4457 if cs != s { 4458 cs.Shutdown() 4459 c.restartServer(cs) 4460 } 4461 } 4462 c.waitOnAllCurrent() 4463 4464 checkSubsPending(t, csub, 0) 4465 checkSubsPending(t, sub, 0) 4466 checkSubsPending(t, usub, 0) 4467 4468 dsub, err := nc.SubscribeSync("$JS.EVENT.ADVISORY.*.DELETED.>") 4469 if err != nil { 4470 t.Fatalf("Unexpected error: %v", err) 4471 } 4472 defer dsub.Unsubscribe() 4473 nc.Flush() 4474 4475 c.waitOnConsumerLeader("$G", "TEST-1", "DC") 4476 c.waitOnLeader() 4477 4478 // Now check delete advisories as well. 4479 if err := js.DeleteConsumer("TEST-1", "DC"); err != nil { 4480 t.Fatalf("Unexpected error: %v", err) 4481 } 4482 4483 checkSubsPending(t, csub, 0) 4484 checkSubsPending(t, dsub, 1) 4485 checkSubsPending(t, sub, 1) 4486 checkSubsPending(t, usub, 0) 4487 drainSub(dsub) 4488 4489 if err := js.DeleteStream("TEST-3"); err != nil { 4490 t.Fatalf("Unexpected error: %v", err) 4491 } 4492 4493 checkSubsPending(t, dsub, 2) // Stream and the consumer underneath. 4494 checkSubsPending(t, sub, 2) 4495 } 4496 4497 func TestJetStreamClusterNoDuplicateOnNodeRestart(t *testing.T) { 4498 c := createJetStreamClusterExplicit(t, "ND", 2) 4499 defer c.shutdown() 4500 4501 // Client based API 4502 s := c.randomServer() 4503 nc, js := jsClientConnect(t, s) 4504 defer nc.Close() 4505 4506 _, err := js.AddStream(&nats.StreamConfig{ 4507 Name: "TEST", 4508 Subjects: []string{"foo"}, 4509 }) 4510 if err != nil { 4511 t.Fatalf("Unexpected error: %v", err) 4512 } 4513 4514 sl := c.streamLeader("$G", "TEST") 4515 if s == sl { 4516 nc.Close() 4517 nc, js = jsClientConnect(t, s) 4518 defer nc.Close() 4519 } 4520 4521 sub, err := js.SubscribeSync("foo", nats.Durable("dlc")) 4522 if err != nil { 4523 t.Fatalf("Unexpected error: %v", err) 4524 } 4525 4526 js.Publish("foo", []byte("msg1")) 4527 if m, err := sub.NextMsg(time.Second); err != nil { 4528 t.Fatalf("Unexpected error: %v", err) 4529 } else { 4530 m.AckSync() 4531 } 4532 4533 sl.Shutdown() 4534 c.restartServer(sl) 4535 c.waitOnStreamLeader("$G", "TEST") 4536 c.waitOnConsumerLeader("$G", "TEST", "dlc") 4537 4538 // Send second msg 4539 js.Publish("foo", []byte("msg2")) 4540 msg, err := sub.NextMsg(5 * time.Second) 4541 if err != nil { 4542 t.Fatalf("Error getting message: %v", err) 4543 } 4544 if string(msg.Data) != "msg2" { 4545 t.Fatalf("Unexpected message: %s", msg.Data) 4546 } 4547 msg.AckSync() 4548 4549 // Make sure we don't get a duplicate. 4550 msg, err = sub.NextMsg(250 * time.Millisecond) 4551 if err == nil { 4552 t.Fatalf("Should have gotten an error, got %s", msg.Data) 4553 } 4554 } 4555 4556 func TestJetStreamClusterNoDupePeerSelection(t *testing.T) { 4557 c := createJetStreamClusterExplicit(t, "NDP", 3) 4558 defer c.shutdown() 4559 4560 // Client based API 4561 s := c.randomServer() 4562 nc, js := jsClientConnect(t, s) 4563 defer nc.Close() 4564 4565 // Create 10 streams. Make sure none of them have a replica 4566 // that is the same as the leader. 4567 for i := 1; i <= 10; i++ { 4568 si, err := js.AddStream(&nats.StreamConfig{ 4569 Name: fmt.Sprintf("TEST-%d", i), 4570 Replicas: 3, 4571 }) 4572 if err != nil { 4573 t.Fatalf("Unexpected error: %v", err) 4574 } 4575 if si.Cluster == nil || si.Cluster.Leader == "" || len(si.Cluster.Replicas) != 2 { 4576 t.Fatalf("Unexpected cluster state for stream info: %+v\n", si.Cluster) 4577 } 4578 // Make sure that the replicas are not same as the leader. 4579 for _, pi := range si.Cluster.Replicas { 4580 if pi.Name == si.Cluster.Leader { 4581 t.Fatalf("Found replica that is same as leader, meaning 2 nodes placed on same server") 4582 } 4583 } 4584 // Now do a consumer and check same thing. 4585 sub, err := js.SubscribeSync(si.Config.Name) 4586 if err != nil { 4587 t.Fatalf("Unexpected error: %v", err) 4588 } 4589 ci, err := sub.ConsumerInfo() 4590 if err != nil { 4591 t.Fatalf("Unexpected error getting consumer info: %v", err) 4592 } 4593 for _, pi := range ci.Cluster.Replicas { 4594 if pi.Name == ci.Cluster.Leader { 4595 t.Fatalf("Found replica that is same as leader, meaning 2 nodes placed on same server") 4596 } 4597 } 4598 } 4599 } 4600 4601 func TestJetStreamClusterStreamRemovePeer(t *testing.T) { 4602 c := createJetStreamClusterExplicit(t, "RNS", 5) 4603 defer c.shutdown() 4604 4605 // Client based API 4606 s := c.randomServer() 4607 nc, js := jsClientConnect(t, s) 4608 defer nc.Close() 4609 4610 _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 3}) 4611 if err != nil { 4612 t.Fatalf("Unexpected error: %v", err) 4613 } 4614 // Send in 10 messages. 4615 msg, toSend := []byte("Hello JS Clustering"), 10 4616 for i := 0; i < toSend; i++ { 4617 if _, err = js.Publish("TEST", msg); err != nil { 4618 t.Fatalf("Unexpected publish error: %v", err) 4619 } 4620 } 4621 4622 sub, err := js.SubscribeSync("TEST", nats.Durable("cat")) 4623 if err != nil { 4624 t.Fatalf("Unexpected error: %v", err) 4625 } 4626 checkSubsPending(t, sub, toSend) 4627 4628 // Do ephemeral too. 4629 esub, err := js.SubscribeSync("TEST") 4630 if err != nil { 4631 t.Fatalf("Unexpected error: %v", err) 4632 } 4633 checkSubsPending(t, esub, toSend) 4634 4635 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "pull", AckPolicy: nats.AckNonePolicy}) 4636 require_NoError(t, err) 4637 4638 pullSub, err := js.PullSubscribe("TEST", "pull") 4639 require_NoError(t, err) 4640 4641 // First fetch the messages that are already there. 4642 msgs, err := pullSub.Fetch(toSend, nats.MaxWait(500*time.Millisecond)) 4643 require_NoError(t, err) 4644 require_Equal(t, toSend, len(msgs)) 4645 4646 // Now prepare a check to see if we get unwated `Consumer Deleted` error on peer remove. 4647 pullResults := make(chan error, 1) 4648 go func() { 4649 _, err := pullSub.Fetch(1, nats.MaxWait(30*time.Second)) 4650 // Let's check if we get unwted `Consumer Deleted` error on peer remove. 4651 // Everything else is fine (Leader Changed, Timeout, etc.) 4652 if err != nats.ErrConsumerDeleted { 4653 close(pullResults) 4654 } else { 4655 pullResults <- err 4656 } 4657 }() 4658 4659 ci, err := esub.ConsumerInfo() 4660 if err != nil { 4661 t.Fatalf("Could not fetch consumer info: %v", err) 4662 } 4663 // Capture ephemeral's server and name. 4664 es, en := ci.Cluster.Leader, ci.Name 4665 4666 // Grab stream info. 4667 si, err := js.StreamInfo("TEST") 4668 if err != nil { 4669 t.Fatalf("Unexpected error: %v", err) 4670 } 4671 peers := []string{si.Cluster.Leader} 4672 for _, p := range si.Cluster.Replicas { 4673 peers = append(peers, p.Name) 4674 } 4675 // Pick a truly random server to remove. 4676 rand.Shuffle(len(peers), func(i, j int) { peers[i], peers[j] = peers[j], peers[i] }) 4677 toRemove := peers[0] 4678 4679 // First test bad peer. 4680 req := &JSApiStreamRemovePeerRequest{Peer: "NOT VALID"} 4681 jsreq, err := json.Marshal(req) 4682 if err != nil { 4683 t.Fatalf("Unexpected error: %v", err) 4684 } 4685 // Need to call this by hand for now. 4686 resp, err := nc.Request(fmt.Sprintf(JSApiStreamRemovePeerT, "TEST"), jsreq, time.Second) 4687 if err != nil { 4688 t.Fatalf("Unexpected error: %v", err) 4689 } 4690 var rpResp JSApiStreamRemovePeerResponse 4691 if err := json.Unmarshal(resp.Data, &rpResp); err != nil { 4692 t.Fatalf("Unexpected error: %v", err) 4693 } 4694 if rpResp.Error == nil || !strings.Contains(rpResp.Error.Description, "peer not a member") { 4695 t.Fatalf("Expected error for bad peer, got %+v", rpResp.Error) 4696 } 4697 rpResp.Error = nil 4698 4699 req = &JSApiStreamRemovePeerRequest{Peer: toRemove} 4700 jsreq, err = json.Marshal(req) 4701 if err != nil { 4702 t.Fatalf("Unexpected error: %v", err) 4703 } 4704 4705 resp, err = nc.Request(fmt.Sprintf(JSApiStreamRemovePeerT, "TEST"), jsreq, time.Second) 4706 if err != nil { 4707 t.Fatalf("Unexpected error: %v", err) 4708 } 4709 if err := json.Unmarshal(resp.Data, &rpResp); err != nil { 4710 t.Fatalf("Unexpected error: %v", err) 4711 } 4712 if rpResp.Error != nil { 4713 t.Fatalf("Unexpected error: %+v", rpResp.Error) 4714 } 4715 4716 c.waitOnStreamLeader("$G", "TEST") 4717 4718 checkFor(t, 10*time.Second, 100*time.Millisecond, func() error { 4719 si, err := js.StreamInfo("TEST", nats.MaxWait(time.Second)) 4720 if err != nil { 4721 return fmt.Errorf("Could not fetch stream info: %v", err) 4722 } 4723 if len(si.Cluster.Replicas) != 2 { 4724 return fmt.Errorf("Expected 2 replicas, got %d", len(si.Cluster.Replicas)) 4725 } 4726 for _, peer := range si.Cluster.Replicas { 4727 if !peer.Current { 4728 return fmt.Errorf("Expected replica to be current: %+v", peer) 4729 } 4730 } 4731 if si.Cluster.Leader == toRemove { 4732 return fmt.Errorf("Peer not removed yet: %+v", toRemove) 4733 } 4734 for _, p := range si.Cluster.Replicas { 4735 if p.Name == toRemove { 4736 return fmt.Errorf("Peer not removed yet: %+v", toRemove) 4737 } 4738 } 4739 return nil 4740 }) 4741 4742 c.waitOnConsumerLeader("$G", "TEST", "cat") 4743 c.waitOnConsumerLeader("$G", "TEST", "pull") 4744 4745 // Now check consumer info as well. 4746 checkFor(t, 10*time.Second, 100*time.Millisecond, func() error { 4747 ci, err := js.ConsumerInfo("TEST", "cat", nats.MaxWait(time.Second)) 4748 if err != nil { 4749 return fmt.Errorf("Could not fetch consumer info: %v", err) 4750 } 4751 if len(ci.Cluster.Replicas) != 2 { 4752 return fmt.Errorf("Expected 2 replicas, got %d", len(ci.Cluster.Replicas)) 4753 } 4754 for _, peer := range ci.Cluster.Replicas { 4755 if !peer.Current { 4756 return fmt.Errorf("Expected replica to be current: %+v", peer) 4757 } 4758 } 4759 if ci.Cluster.Leader == toRemove { 4760 return fmt.Errorf("Peer not removed yet: %+v", toRemove) 4761 } 4762 for _, p := range ci.Cluster.Replicas { 4763 if p.Name == toRemove { 4764 return fmt.Errorf("Peer not removed yet: %+v", toRemove) 4765 } 4766 } 4767 return nil 4768 }) 4769 4770 // Check if we got the `Consumer Deleted` error on the pull consumer. 4771 select { 4772 case err := <-pullResults: 4773 if err != nil { 4774 t.Fatalf("Expected timeout error or nil, got %v", err) 4775 } 4776 default: 4777 } 4778 4779 // Now check ephemeral consumer info. 4780 // Make sure we did not stamp same new group into the ephemeral where R=1. 4781 ci, err = esub.ConsumerInfo() 4782 // If the leader was same as what we just removed, this should fail. 4783 if es == toRemove { 4784 if err != nats.ErrConsumerNotFound { 4785 t.Fatalf("Expected a not found error, got %v", err) 4786 } 4787 // Also make sure this was removed all together. 4788 // We may proactively move things in the future. 4789 for cn := range js.ConsumerNames("TEST") { 4790 if cn == en { 4791 t.Fatalf("Expected ephemeral consumer to be deleted since we removed its only peer") 4792 } 4793 } 4794 } else { 4795 if err != nil { 4796 t.Fatalf("Could not fetch consumer info: %v", err) 4797 } 4798 if len(ci.Cluster.Replicas) != 0 { 4799 t.Fatalf("Expected no replicas for ephemeral, got %d", len(ci.Cluster.Replicas)) 4800 } 4801 } 4802 } 4803 4804 func TestJetStreamClusterStreamLeaderStepDown(t *testing.T) { 4805 c := createJetStreamClusterExplicit(t, "RNS", 3) 4806 defer c.shutdown() 4807 4808 // Client based API 4809 s := c.randomServer() 4810 nc, js := jsClientConnect(t, s) 4811 defer nc.Close() 4812 4813 _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 3}) 4814 if err != nil { 4815 t.Fatalf("Unexpected error: %v", err) 4816 } 4817 // Send in 10 messages. 4818 msg, toSend := []byte("Hello JS Clustering"), 10 4819 for i := 0; i < toSend; i++ { 4820 if _, err = js.Publish("TEST", msg); err != nil { 4821 t.Fatalf("Unexpected publish error: %v", err) 4822 } 4823 } 4824 4825 sub, err := js.SubscribeSync("TEST", nats.Durable("cat")) 4826 if err != nil { 4827 t.Fatalf("Unexpected error: %v", err) 4828 } 4829 defer sub.Unsubscribe() 4830 4831 oldLeader := c.streamLeader("$G", "TEST").Name() 4832 4833 // Need to call this by hand for now. 4834 resp, err := nc.Request(fmt.Sprintf(JSApiStreamLeaderStepDownT, "TEST"), nil, time.Second) 4835 if err != nil { 4836 t.Fatalf("Unexpected error: %v", err) 4837 } 4838 var sdResp JSApiStreamLeaderStepDownResponse 4839 if err := json.Unmarshal(resp.Data, &sdResp); err != nil { 4840 t.Fatalf("Unexpected error: %v", err) 4841 } 4842 if sdResp.Error != nil { 4843 t.Fatalf("Unexpected error: %+v", sdResp.Error) 4844 } 4845 4846 // Grab shorter timeout jetstream context. 4847 js, err = nc.JetStream(nats.MaxWait(250 * time.Millisecond)) 4848 if err != nil { 4849 t.Fatalf("Unexpected error: %v", err) 4850 } 4851 4852 checkFor(t, 2*time.Second, 50*time.Millisecond, func() error { 4853 si, err := js.StreamInfo("TEST") 4854 if err != nil { 4855 return fmt.Errorf("Could not fetch stream info: %v", err) 4856 } 4857 if si.Cluster.Leader == oldLeader { 4858 return fmt.Errorf("Still have old leader") 4859 } 4860 if len(si.Cluster.Replicas) != 2 { 4861 return fmt.Errorf("Expected 2 replicas, got %d", len(si.Cluster.Replicas)) 4862 } 4863 for _, peer := range si.Cluster.Replicas { 4864 if !peer.Current { 4865 return fmt.Errorf("Expected replica to be current: %+v", peer) 4866 } 4867 } 4868 return nil 4869 }) 4870 4871 // Now do consumer. 4872 oldLeader = c.consumerLeader("$G", "TEST", "cat").Name() 4873 4874 // Need to call this by hand for now. 4875 resp, err = nc.Request(fmt.Sprintf(JSApiConsumerLeaderStepDownT, "TEST", "cat"), nil, time.Second) 4876 if err != nil { 4877 t.Fatalf("Unexpected error: %v", err) 4878 } 4879 var cdResp JSApiConsumerLeaderStepDownResponse 4880 if err := json.Unmarshal(resp.Data, &cdResp); err != nil { 4881 t.Fatalf("Unexpected error: %v", err) 4882 } 4883 if cdResp.Error != nil { 4884 t.Fatalf("Unexpected error: %+v", cdResp.Error) 4885 } 4886 4887 checkFor(t, 2*time.Second, 50*time.Millisecond, func() error { 4888 ci, err := js.ConsumerInfo("TEST", "cat") 4889 if err != nil { 4890 return fmt.Errorf("Could not fetch consumer info: %v", err) 4891 } 4892 if ci.Cluster.Leader == oldLeader { 4893 return fmt.Errorf("Still have old leader") 4894 } 4895 if len(ci.Cluster.Replicas) != 2 { 4896 return fmt.Errorf("Expected 2 replicas, got %d", len(ci.Cluster.Replicas)) 4897 } 4898 for _, peer := range ci.Cluster.Replicas { 4899 if !peer.Current { 4900 return fmt.Errorf("Expected replica to be current: %+v", peer) 4901 } 4902 } 4903 return nil 4904 }) 4905 } 4906 4907 func TestJetStreamClusterRemoveServer(t *testing.T) { 4908 skip(t) 4909 4910 c := createJetStreamClusterExplicit(t, "RNS", 5) 4911 defer c.shutdown() 4912 4913 // Client based API 4914 s := c.randomServer() 4915 nc, js := jsClientConnect(t, s) 4916 defer nc.Close() 4917 4918 _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 3}) 4919 if err != nil { 4920 t.Fatalf("Unexpected error: %v", err) 4921 } 4922 // Send in 10 messages. 4923 msg, toSend := []byte("Hello JS Clustering"), 10 4924 for i := 0; i < toSend; i++ { 4925 if _, err = js.Publish("TEST", msg); err != nil { 4926 t.Fatalf("Unexpected publish error: %v", err) 4927 } 4928 } 4929 sub, err := js.SubscribeSync("TEST") 4930 if err != nil { 4931 t.Fatalf("Unexpected error: %v", err) 4932 } 4933 checkSubsPending(t, sub, toSend) 4934 ci, err := sub.ConsumerInfo() 4935 if err != nil { 4936 t.Fatalf("Unexpected error: %v", err) 4937 } 4938 cname := ci.Name 4939 4940 sl := c.streamLeader("$G", "TEST") 4941 c.removeJetStream(sl) 4942 4943 c.waitOnLeader() 4944 c.waitOnStreamLeader("$G", "TEST") 4945 4946 // Faster timeout since we loop below checking for condition. 4947 js, err = nc.JetStream(nats.MaxWait(250 * time.Millisecond)) 4948 if err != nil { 4949 t.Fatalf("Unexpected error: %v", err) 4950 } 4951 4952 // Check the stream info is eventually correct. 4953 checkFor(t, 20*time.Second, 100*time.Millisecond, func() error { 4954 si, err := js.StreamInfo("TEST") 4955 if err != nil { 4956 return fmt.Errorf("Could not fetch stream info: %v", err) 4957 } 4958 if len(si.Cluster.Replicas) != 2 { 4959 return fmt.Errorf("Expected 2 replicas, got %d", len(si.Cluster.Replicas)) 4960 } 4961 for _, peer := range si.Cluster.Replicas { 4962 if !peer.Current { 4963 return fmt.Errorf("Expected replica to be current: %+v", peer) 4964 } 4965 } 4966 return nil 4967 }) 4968 4969 // Now do consumer. 4970 c.waitOnConsumerLeader("$G", "TEST", cname) 4971 checkFor(t, 20*time.Second, 50*time.Millisecond, func() error { 4972 ci, err := js.ConsumerInfo("TEST", cname) 4973 if err != nil { 4974 return fmt.Errorf("Could not fetch consumer info: %v", err) 4975 } 4976 if len(ci.Cluster.Replicas) != 2 { 4977 return fmt.Errorf("Expected 2 replicas, got %d", len(ci.Cluster.Replicas)) 4978 } 4979 for _, peer := range ci.Cluster.Replicas { 4980 if !peer.Current { 4981 return fmt.Errorf("Expected replica to be current: %+v", peer) 4982 } 4983 } 4984 return nil 4985 }) 4986 } 4987 4988 func TestJetStreamClusterPurgeReplayAfterRestart(t *testing.T) { 4989 c := createJetStreamClusterExplicit(t, "P3F", 3) 4990 defer c.shutdown() 4991 4992 // Client based API 4993 s := c.randomNonLeader() 4994 nc, js := jsClientConnect(t, s) 4995 defer nc.Close() 4996 4997 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 3}); err != nil { 4998 t.Fatalf("Unexpected error: %v", err) 4999 } 5000 5001 sendBatch := func(n int) { 5002 t.Helper() 5003 // Send a batch to a given subject. 5004 for i := 0; i < n; i++ { 5005 if _, err := js.Publish("TEST", []byte("OK")); err != nil { 5006 t.Fatalf("Unexpected publish error: %v", err) 5007 } 5008 } 5009 } 5010 5011 sendBatch(10) 5012 if err := js.PurgeStream("TEST"); err != nil { 5013 t.Fatalf("Unexpected purge error: %v", err) 5014 } 5015 sendBatch(10) 5016 5017 c.stopAll() 5018 c.restartAll() 5019 5020 c.waitOnStreamLeader("$G", "TEST") 5021 5022 s = c.randomServer() 5023 nc, js = jsClientConnect(t, s) 5024 defer nc.Close() 5025 5026 si, err := js.StreamInfo("TEST") 5027 if err != nil { 5028 t.Fatalf("Unexpected error: %v", err) 5029 } 5030 if si.State.Msgs != 10 { 5031 t.Fatalf("Expected 10 msgs after restart, got %d", si.State.Msgs) 5032 } 5033 } 5034 5035 func TestJetStreamClusterStreamGetMsg(t *testing.T) { 5036 c := createJetStreamClusterExplicit(t, "R3F", 3) 5037 defer c.shutdown() 5038 5039 // Client based API 5040 s := c.randomServer() 5041 nc, js := jsClientConnect(t, s) 5042 defer nc.Close() 5043 5044 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST"}); err != nil { 5045 t.Fatalf("Unexpected error: %v", err) 5046 } 5047 if _, err := js.Publish("TEST", []byte("OK")); err != nil { 5048 t.Fatalf("Unexpected publish error: %v", err) 5049 } 5050 5051 mreq := &JSApiMsgGetRequest{Seq: 1} 5052 req, err := json.Marshal(mreq) 5053 if err != nil { 5054 t.Fatalf("Unexpected error: %v", err) 5055 } 5056 rmsg, err := nc.Request(fmt.Sprintf(JSApiMsgGetT, "TEST"), req, time.Second) 5057 if err != nil { 5058 t.Fatalf("Could not retrieve stream message: %v", err) 5059 } 5060 if err != nil { 5061 t.Fatalf("Could not retrieve stream message: %v", err) 5062 } 5063 5064 var resp JSApiMsgGetResponse 5065 err = json.Unmarshal(rmsg.Data, &resp) 5066 if err != nil { 5067 t.Fatalf("Could not parse stream message: %v", err) 5068 } 5069 if resp.Message == nil || resp.Error != nil { 5070 t.Fatalf("Did not receive correct response: %+v", resp.Error) 5071 } 5072 } 5073 5074 func TestJetStreamClusterStreamDirectGetMsg(t *testing.T) { 5075 c := createJetStreamClusterExplicit(t, "R3F", 3) 5076 defer c.shutdown() 5077 5078 // Client based API 5079 s := c.randomServer() 5080 nc, _ := jsClientConnect(t, s) 5081 defer nc.Close() 5082 5083 // Do by hand for now. 5084 cfg := &StreamConfig{ 5085 Name: "TEST", 5086 Subjects: []string{"foo"}, 5087 Storage: MemoryStorage, 5088 Replicas: 3, 5089 MaxMsgsPer: 1, 5090 AllowDirect: true, 5091 } 5092 addStream(t, nc, cfg) 5093 sendStreamMsg(t, nc, "foo", "bar") 5094 5095 getSubj := fmt.Sprintf(JSDirectMsgGetT, "TEST") 5096 getMsg := func(req *JSApiMsgGetRequest) *nats.Msg { 5097 var b []byte 5098 var err error 5099 if req != nil { 5100 b, err = json.Marshal(req) 5101 require_NoError(t, err) 5102 } 5103 m, err := nc.Request(getSubj, b, time.Second) 5104 require_NoError(t, err) 5105 return m 5106 } 5107 5108 m := getMsg(&JSApiMsgGetRequest{LastFor: "foo"}) 5109 require_True(t, string(m.Data) == "bar") 5110 require_True(t, m.Header.Get(JSStream) == "TEST") 5111 require_True(t, m.Header.Get(JSSequence) == "1") 5112 require_True(t, m.Header.Get(JSSubject) == "foo") 5113 require_True(t, m.Subject != "foo") 5114 require_True(t, m.Header.Get(JSTimeStamp) != _EMPTY_) 5115 } 5116 5117 func TestJetStreamClusterStreamPerf(t *testing.T) { 5118 // Comment out to run, holding place for now. 5119 skip(t) 5120 5121 c := createJetStreamClusterExplicit(t, "R3S", 3) 5122 defer c.shutdown() 5123 5124 // Client based API 5125 s := c.randomServer() 5126 nc, js := jsClientConnect(t, s) 5127 defer nc.Close() 5128 5129 _, err := js.AddStream(&nats.StreamConfig{ 5130 Name: "TEST", 5131 Subjects: []string{"foo"}, 5132 Replicas: 3, 5133 }) 5134 if err != nil { 5135 t.Fatalf("Unexpected error: %v", err) 5136 } 5137 5138 numConnections := 4 5139 var conns []nats.JetStream 5140 for i := 0; i < numConnections; i++ { 5141 s := c.randomServer() 5142 nc, js := jsClientConnect(t, s) 5143 defer nc.Close() 5144 conns = append(conns, js) 5145 } 5146 5147 toSend := 100000 5148 numProducers := 8 5149 5150 payload := []byte("Hello JSC") 5151 5152 startCh := make(chan bool) 5153 var wg sync.WaitGroup 5154 5155 for n := 0; n < numProducers; n++ { 5156 wg.Add(1) 5157 go func() { 5158 defer wg.Done() 5159 js := conns[rand.Intn(numConnections)] 5160 <-startCh 5161 for i := 0; i < int(toSend)/numProducers; i++ { 5162 if _, err = js.Publish("foo", payload); err != nil { 5163 t.Errorf("Unexpected publish error: %v", err) 5164 } 5165 } 5166 }() 5167 } 5168 5169 // Wait for Go routines. 5170 time.Sleep(250 * time.Millisecond) 5171 5172 start := time.Now() 5173 close(startCh) 5174 wg.Wait() 5175 5176 tt := time.Since(start) 5177 fmt.Printf("Took %v to send %d msgs with %d producers and R=3!\n", tt, toSend, numProducers) 5178 fmt.Printf("%.0f msgs/sec\n\n", float64(toSend)/tt.Seconds()) 5179 } 5180 5181 func TestJetStreamClusterConsumerPerf(t *testing.T) { 5182 // Comment out to run, holding place for now. 5183 skip(t) 5184 5185 c := createJetStreamClusterExplicit(t, "R3S", 3) 5186 defer c.shutdown() 5187 5188 // Client based API 5189 s := c.randomServer() 5190 nc, js := jsClientConnect(t, s) 5191 defer nc.Close() 5192 5193 _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 3}) 5194 if err != nil { 5195 t.Fatalf("Unexpected error: %v", err) 5196 } 5197 toSend := 500000 5198 msg := make([]byte, 64) 5199 crand.Read(msg) 5200 5201 for i := 0; i < toSend; i++ { 5202 nc.Publish("TEST", msg) 5203 } 5204 nc.Flush() 5205 5206 checkFor(t, 10*time.Second, 250*time.Millisecond, func() error { 5207 si, err := js.StreamInfo("TEST") 5208 if err != nil { 5209 return fmt.Errorf("Unexpected error: %v", err) 5210 } 5211 if si.State.Msgs != uint64(toSend) { 5212 return fmt.Errorf("Expected to have %d messages, got %d", toSend, si.State.Msgs) 5213 } 5214 return nil 5215 }) 5216 5217 received := int32(0) 5218 deliverTo := "r" 5219 done := make(chan bool) 5220 total := int32(toSend) 5221 var start time.Time 5222 5223 nc.Subscribe(deliverTo, func(m *nats.Msg) { 5224 if r := atomic.AddInt32(&received, 1); r >= total { 5225 done <- true 5226 } else if r == 1 { 5227 start = time.Now() 5228 } 5229 }) 5230 5231 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{DeliverSubject: deliverTo, Durable: "gf"}) 5232 if err != nil { 5233 t.Fatalf("Unexpected error: %v", err) 5234 } 5235 5236 select { 5237 case <-done: 5238 case <-time.After(10 * time.Second): 5239 t.Fatalf("Timed out?") 5240 } 5241 tt := time.Since(start) 5242 fmt.Printf("Took %v to receive %d msgs\n", tt, toSend) 5243 fmt.Printf("%.0f msgs/sec\n\n", float64(toSend)/tt.Seconds()) 5244 } 5245 5246 // This test creates a queue consumer for the delivery subject, 5247 // and make sure it connects to the server that is not the leader 5248 // of the stream. A bug was not stripping the $JS.ACK reply subject 5249 // correctly, which means that ack sent on the reply subject was 5250 // dropped by the route. 5251 func TestJetStreamClusterQueueSubConsumer(t *testing.T) { 5252 c := createJetStreamClusterExplicit(t, "R2S", 2) 5253 defer c.shutdown() 5254 5255 // Client based API 5256 s := c.randomServer() 5257 nc, js := jsClientConnect(t, s) 5258 defer nc.Close() 5259 5260 _, err := js.AddStream(&nats.StreamConfig{ 5261 Name: "TEST", 5262 Subjects: []string{"foo.>"}, 5263 Replicas: 1, 5264 }) 5265 if err != nil { 5266 t.Fatalf("Unexpected error: %v", err) 5267 } 5268 5269 inbox := nats.NewInbox() 5270 obsReq := CreateConsumerRequest{ 5271 Stream: "TEST", 5272 Config: ConsumerConfig{ 5273 Durable: "ivan", 5274 DeliverSubject: inbox, 5275 DeliverGroup: "queue", 5276 AckPolicy: AckExplicit, 5277 AckWait: 100 * time.Millisecond, 5278 }, 5279 } 5280 req, err := json.Marshal(obsReq) 5281 if err != nil { 5282 t.Fatalf("Unexpected error: %v", err) 5283 } 5284 resp, err := nc.Request(fmt.Sprintf(JSApiDurableCreateT, "TEST", "ivan"), req, time.Second) 5285 if err != nil { 5286 t.Fatalf("Unexpected error: %v", err) 5287 } 5288 var ccResp JSApiConsumerCreateResponse 5289 if err = json.Unmarshal(resp.Data, &ccResp); err != nil { 5290 t.Fatalf("Unexpected error: %v", err) 5291 } 5292 if ccResp.Error != nil { 5293 t.Fatalf("Unexpected error, got %+v", ccResp.Error) 5294 } 5295 5296 ci, err := js.ConsumerInfo("TEST", "ivan") 5297 if err != nil { 5298 t.Fatalf("Error getting consumer info: %v", err) 5299 } 5300 5301 // Now create a client that does NOT connect to the stream leader. 5302 // Start with url from first server in the cluster. 5303 u := c.servers[0].ClientURL() 5304 // If leader is "S-1", then use S-2 to connect to, which is at servers[1]. 5305 if ci.Cluster.Leader == "S-1" { 5306 u = c.servers[1].ClientURL() 5307 } 5308 qsubnc, err := nats.Connect(u) 5309 if err != nil { 5310 t.Fatalf("Error connecting: %v", err) 5311 } 5312 defer qsubnc.Close() 5313 5314 ch := make(chan struct{}, 2) 5315 if _, err := qsubnc.QueueSubscribe(inbox, "queue", func(m *nats.Msg) { 5316 m.Respond(nil) 5317 ch <- struct{}{} 5318 }); err != nil { 5319 t.Fatalf("Error creating sub: %v", err) 5320 } 5321 5322 // Use the other connection to publish a message 5323 if _, err := js.Publish("foo.bar", []byte("hello")); err != nil { 5324 t.Fatalf("Error on publish: %v", err) 5325 } 5326 5327 // Wait that we receive the message first. 5328 select { 5329 case <-ch: 5330 case <-time.After(time.Second): 5331 t.Fatal("Did not receive message") 5332 } 5333 5334 // Message should be ack'ed and not redelivered. 5335 select { 5336 case <-ch: 5337 t.Fatal("Message redelivered!!!") 5338 case <-time.After(250 * time.Millisecond): 5339 // OK 5340 } 5341 } 5342 5343 func TestJetStreamClusterLeaderStepdown(t *testing.T) { 5344 c := createJetStreamClusterExplicit(t, "JSC", 3) 5345 defer c.shutdown() 5346 5347 c.waitOnLeader() 5348 cl := c.leader() 5349 // Now ask the system account to have the leader stepdown. 5350 s := c.randomNonLeader() 5351 nc, err := nats.Connect(s.ClientURL(), nats.UserInfo("admin", "s3cr3t!")) 5352 if err != nil { 5353 t.Fatalf("Failed to create system client: %v", err) 5354 } 5355 defer nc.Close() 5356 5357 resp, err := nc.Request(JSApiLeaderStepDown, nil, time.Second) 5358 if err != nil { 5359 t.Fatalf("Error on stepdown request: %v", err) 5360 } 5361 var sdr JSApiLeaderStepDownResponse 5362 if err := json.Unmarshal(resp.Data, &sdr); err != nil { 5363 t.Fatalf("Unexpected error: %v", err) 5364 } 5365 if sdr.Error != nil || !sdr.Success { 5366 t.Fatalf("Unexpected error for leader stepdown: %+v", sdr.Error) 5367 } 5368 5369 c.waitOnLeader() 5370 if cl == c.leader() { 5371 t.Fatalf("Expected a new metaleader, got same") 5372 } 5373 } 5374 5375 func TestJetStreamClusterSourcesFilteringAndUpdating(t *testing.T) { 5376 c := createJetStreamClusterExplicit(t, "MSR", 5) 5377 defer c.shutdown() 5378 5379 // Client for API requests. 5380 nc, js := jsClientConnect(t, c.randomServer()) 5381 defer nc.Close() 5382 5383 sendBatch := func(subject string, n int) { 5384 t.Helper() 5385 // Send a batch to a given subject. 5386 for i := 0; i < n; i++ { 5387 if _, err := js.Publish(subject, []byte("OK")); err != nil { 5388 t.Fatalf("Unexpected publish error: %v", err) 5389 } 5390 } 5391 } 5392 5393 checkSync := func(msgsTest, msgsM uint64) { 5394 t.Helper() 5395 checkFor(t, 20*time.Second, 500*time.Millisecond, func() error { 5396 if tsi, err := js.StreamInfo("TEST"); err != nil { 5397 return err 5398 } else if msi, err := js.StreamInfo("M"); err != nil { 5399 return err 5400 } else if tsi.State.Msgs != msgsTest { 5401 return fmt.Errorf("received %d msgs from TEST, expected %d", tsi.State.Msgs, msgsTest) 5402 } else if msi.State.Msgs != msgsM { 5403 return fmt.Errorf("received %d msgs from M, expected %d", msi.State.Msgs, msgsM) 5404 } 5405 return nil 5406 }) 5407 } 5408 5409 // Origin 5410 _, err := js.AddStream(&nats.StreamConfig{ 5411 Name: "TEST", 5412 Subjects: []string{"foo", "bar"}, 5413 Replicas: 2, 5414 }) 5415 require_NoError(t, err) 5416 defer js.DeleteStream("TEST") 5417 5418 // Create M stream with a single source on "foo" 5419 _, err = js.AddStream(&nats.StreamConfig{ 5420 Name: "M", 5421 Sources: []*nats.StreamSource{{Name: "TEST", FilterSubject: "foo"}}, 5422 Replicas: 2, 5423 }) 5424 require_NoError(t, err) 5425 defer js.DeleteStream("M") 5426 5427 // check a message on "bar" doesn't get sourced 5428 sendBatch("bar", 100) 5429 checkSync(100, 0) 5430 // check a message on "foo" does get sourced 5431 sendBatch("foo", 100) 5432 checkSync(200, 100) 5433 5434 // change remove the source on "foo" and add a new source on "bar" 5435 _, err = js.UpdateStream(&nats.StreamConfig{ 5436 Name: "M", 5437 Sources: []*nats.StreamSource{{Name: "TEST", FilterSubject: "bar"}}, 5438 Replicas: 2, 5439 }) 5440 require_NoError(t, err) 5441 5442 // as it is a new source (never been sourced before) it starts sourcing at the start of TEST 5443 // and therefore sources the message on "bar" that is in TEST 5444 checkSync(200, 200) 5445 5446 // new messages on "foo" are being filtered as it's not being currently sourced 5447 sendBatch("foo", 100) 5448 checkSync(300, 200) 5449 // new messages on "bar" are being sourced 5450 sendBatch("bar", 100) 5451 checkSync(400, 300) 5452 5453 // re-add the source for "foo" keep the source on "bar" 5454 _, err = js.UpdateStream(&nats.StreamConfig{ 5455 Name: "M", 5456 Sources: []*nats.StreamSource{{Name: "TEST", FilterSubject: "bar"}, {Name: "TEST", FilterSubject: "foo"}}, 5457 Replicas: 2, 5458 }) 5459 require_NoError(t, err) 5460 5461 // check the 'backfill' of messages on "foo" that were published while the source was inactive 5462 checkSync(400, 400) 5463 5464 // causes startingSequenceForSources() to be called 5465 nc.Close() 5466 c.stopAll() 5467 c.restartAll() 5468 c.waitOnStreamLeader("$G", "TEST") 5469 c.waitOnStreamLeader("$G", "M") 5470 5471 nc, js = jsClientConnect(t, c.randomServer()) 5472 defer nc.Close() 5473 5474 // check that it restarted the sources' consumers at the right place 5475 checkSync(400, 400) 5476 5477 // check both sources are still active 5478 sendBatch("bar", 100) 5479 checkSync(500, 500) 5480 sendBatch("foo", 100) 5481 checkSync(600, 600) 5482 5483 // Check that purging the stream and does not cause the sourcing of the messages 5484 js.PurgeStream("M") 5485 checkSync(600, 0) 5486 5487 // Even after a leader change or restart 5488 nc.Close() 5489 c.stopAll() 5490 c.restartAll() 5491 c.waitOnStreamLeader("$G", "TEST") 5492 c.waitOnStreamLeader("$G", "M") 5493 5494 nc, js = jsClientConnect(t, c.randomServer()) 5495 defer nc.Close() 5496 5497 checkSync(600, 0) 5498 } 5499 5500 func TestJetStreamClusterSourcesUpdateOriginError(t *testing.T) { 5501 c := createJetStreamClusterExplicit(t, "MSR", 5) 5502 defer c.shutdown() 5503 5504 // Client for API requests. 5505 nc, js := jsClientConnect(t, c.randomServer()) 5506 defer nc.Close() 5507 5508 sendBatch := func(subject string, n int) { 5509 t.Helper() 5510 // Send a batch to a given subject. 5511 for i := 0; i < n; i++ { 5512 if _, err := js.Publish(subject, []byte("OK")); err != nil { 5513 t.Fatalf("Unexpected publish error: %v", err) 5514 } 5515 } 5516 } 5517 5518 checkSync := func(msgsTest, msgsM uint64) { 5519 t.Helper() 5520 checkFor(t, 10*time.Second, 500*time.Millisecond, func() error { 5521 if tsi, err := js.StreamInfo("TEST"); err != nil { 5522 return err 5523 } else if msi, err := js.StreamInfo("M"); err != nil { 5524 return err 5525 } else if tsi.State.Msgs != msgsTest { 5526 return fmt.Errorf("received %d msgs from TEST, expected %d", tsi.State.Msgs, msgsTest) 5527 } else if msi.State.Msgs != msgsM { 5528 return fmt.Errorf("received %d msgs from M, expected %d", msi.State.Msgs, msgsM) 5529 } 5530 return nil 5531 }) 5532 } 5533 5534 // Origin 5535 _, err := js.AddStream(&nats.StreamConfig{ 5536 Name: "TEST", 5537 Subjects: []string{"foo"}, 5538 Replicas: 2, 5539 }) 5540 require_NoError(t, err) 5541 5542 _, err = js.AddStream(&nats.StreamConfig{ 5543 Name: "M", 5544 Sources: []*nats.StreamSource{{Name: "TEST", FilterSubject: "foo"}}, 5545 Replicas: 2, 5546 }) 5547 5548 require_NoError(t, err) 5549 5550 // Send 100 msgs. 5551 sendBatch("foo", 100) 5552 checkSync(100, 100) 5553 5554 // update makes source invalid 5555 _, err = js.UpdateStream(&nats.StreamConfig{ 5556 Name: "TEST", 5557 Subjects: []string{"bar"}, 5558 Replicas: 2, 5559 }) 5560 require_NoError(t, err) 5561 5562 // TODO check for downstream error propagation 5563 5564 _, err = js.Publish("foo", nil) 5565 require_Error(t, err) 5566 5567 sendBatch("bar", 100) 5568 // The source stream remains at 100 msgs as it still uses foo as it's filter 5569 checkSync(200, 100) 5570 5571 nc.Close() 5572 c.stopAll() 5573 c.restartAll() 5574 c.waitOnStreamLeader("$G", "TEST") 5575 c.waitOnStreamLeader("$G", "M") 5576 5577 nc, js = jsClientConnect(t, c.randomServer()) 5578 defer nc.Close() 5579 5580 checkSync(200, 100) 5581 5582 _, err = js.Publish("foo", nil) 5583 require_Error(t, err) 5584 require_Equal(t, err.Error(), "nats: no response from stream") 5585 5586 sendBatch("bar", 100) 5587 // The source stream remains at 100 msgs as it still uses foo as it's filter 5588 checkSync(300, 100) 5589 } 5590 5591 func TestJetStreamClusterMirrorAndSourcesClusterRestart(t *testing.T) { 5592 test := func(t *testing.T, mirror bool, filter bool) { 5593 c := createJetStreamClusterExplicit(t, "MSR", 5) 5594 defer c.shutdown() 5595 5596 // Client for API requests. 5597 nc, js := jsClientConnect(t, c.randomServer()) 5598 defer nc.Close() 5599 5600 // Origin 5601 _, err := js.AddStream(&nats.StreamConfig{ 5602 Name: "TEST", 5603 Subjects: []string{"foo", "bar", "baz.*"}, 5604 Replicas: 2, 5605 }) 5606 require_NoError(t, err) 5607 5608 filterSubj := _EMPTY_ 5609 if filter { 5610 filterSubj = "foo" 5611 } 5612 5613 // Create Mirror/Source now. 5614 if mirror { 5615 _, err = js.AddStream(&nats.StreamConfig{ 5616 Name: "M", 5617 Mirror: &nats.StreamSource{Name: "TEST", FilterSubject: filterSubj}, 5618 Replicas: 2, 5619 }) 5620 } else { 5621 _, err = js.AddStream(&nats.StreamConfig{ 5622 Name: "M", 5623 Sources: []*nats.StreamSource{{Name: "TEST", FilterSubject: filterSubj}}, 5624 Replicas: 2, 5625 }) 5626 } 5627 require_NoError(t, err) 5628 5629 expectedMsgCount := uint64(0) 5630 5631 sendBatch := func(subject string, n int) { 5632 t.Helper() 5633 if subject == "foo" || !filter { 5634 expectedMsgCount += uint64(n) 5635 } 5636 // Send a batch to a given subject. 5637 for i := 0; i < n; i++ { 5638 if _, err := js.Publish(subject, []byte("OK")); err != nil { 5639 t.Fatalf("Unexpected publish error: %v", err) 5640 } 5641 } 5642 } 5643 5644 checkSync := func(msgsTest, msgsM uint64) { 5645 t.Helper() 5646 checkFor(t, 20*time.Second, 500*time.Millisecond, func() error { 5647 if tsi, err := js.StreamInfo("TEST"); err != nil { 5648 return err 5649 } else if msi, err := js.StreamInfo("M"); err != nil { 5650 return err 5651 } else if tsi.State.Msgs != msgsTest { 5652 return fmt.Errorf("received %d msgs from TEST, expected %d", tsi.State.Msgs, msgsTest) 5653 } else if msi.State.Msgs != msgsM { 5654 return fmt.Errorf("received %d msgs from M, expected %d", msi.State.Msgs, msgsM) 5655 } 5656 return nil 5657 }) 5658 } 5659 5660 sendBatch("foo", 100) 5661 checkSync(100, expectedMsgCount) 5662 sendBatch("bar", 100) 5663 checkSync(200, expectedMsgCount) 5664 5665 nc.Close() 5666 c.stopAll() 5667 c.restartAll() 5668 c.waitOnStreamLeader("$G", "TEST") 5669 c.waitOnStreamLeader("$G", "M") 5670 5671 nc, js = jsClientConnect(t, c.randomServer()) 5672 defer nc.Close() 5673 5674 checkSync(200, expectedMsgCount) 5675 sendBatch("foo", 100) 5676 checkSync(300, expectedMsgCount) 5677 sendBatch("bar", 100) 5678 checkSync(400, expectedMsgCount) 5679 } 5680 t.Run("mirror-filter", func(t *testing.T) { 5681 test(t, true, true) 5682 }) 5683 t.Run("mirror-nofilter", func(t *testing.T) { 5684 test(t, true, false) 5685 }) 5686 t.Run("source-filter", func(t *testing.T) { 5687 test(t, false, true) 5688 }) 5689 t.Run("source-nofilter", func(t *testing.T) { 5690 test(t, false, false) 5691 }) 5692 } 5693 5694 func TestJetStreamClusterMirrorAndSourcesFilteredConsumers(t *testing.T) { 5695 c := createJetStreamClusterWithTemplate(t, jsClusterMirrorSourceImportsTempl, "MS5", 5) 5696 defer c.shutdown() 5697 5698 // Client for API requests. 5699 s := c.randomServer() 5700 nc, js := jsClientConnect(t, s) 5701 defer nc.Close() 5702 5703 // Origin 5704 _, err := js.AddStream(&nats.StreamConfig{ 5705 Name: "TEST", 5706 Subjects: []string{"foo", "bar", "baz.*"}, 5707 }) 5708 if err != nil { 5709 t.Fatalf("Unexpected error: %v", err) 5710 } 5711 // Create Mirror now. 5712 _, err = js.AddStream(&nats.StreamConfig{ 5713 Name: "M", 5714 Mirror: &nats.StreamSource{Name: "TEST"}, 5715 }) 5716 if err != nil { 5717 t.Fatalf("Unexpected error: %v", err) 5718 } 5719 5720 dsubj := nats.NewInbox() 5721 nc.SubscribeSync(dsubj) 5722 nc.Flush() 5723 5724 createConsumer := func(sn, fs string) { 5725 t.Helper() 5726 _, err = js.AddConsumer(sn, &nats.ConsumerConfig{DeliverSubject: dsubj, FilterSubject: fs}) 5727 if err != nil { 5728 t.Fatalf("Unexpected error: %v", err) 5729 } 5730 } 5731 expectFail := func(sn, fs string) { 5732 t.Helper() 5733 _, err = js.AddConsumer(sn, &nats.ConsumerConfig{DeliverSubject: dsubj, FilterSubject: fs}) 5734 if err == nil { 5735 t.Fatalf("Expected error but got none") 5736 } 5737 } 5738 5739 createConsumer("M", "foo") 5740 createConsumer("M", "bar") 5741 createConsumer("M", "baz.foo") 5742 expectFail("M", ".") 5743 expectFail("M", ">.foo") 5744 5745 // Make sure wider scoped subjects work as well. 5746 createConsumer("M", "*") 5747 createConsumer("M", ">") 5748 5749 // Now do some sources. 5750 if _, err := js.AddStream(&nats.StreamConfig{Name: "O1", Subjects: []string{"foo.*"}}); err != nil { 5751 t.Fatalf("Unexpected error: %v", err) 5752 } 5753 if _, err := js.AddStream(&nats.StreamConfig{Name: "O2", Subjects: []string{"bar.*"}}); err != nil { 5754 t.Fatalf("Unexpected error: %v", err) 5755 } 5756 5757 // Create downstream now. 5758 _, err = js.AddStream(&nats.StreamConfig{ 5759 Name: "S", 5760 Sources: []*nats.StreamSource{{Name: "O1"}, {Name: "O2"}}, 5761 }) 5762 if err != nil { 5763 t.Fatalf("Unexpected error: %v", err) 5764 } 5765 5766 createConsumer("S", "foo.1") 5767 createConsumer("S", "bar.1") 5768 5769 // Now cross account stuff. 5770 nc2, js2 := jsClientConnect(t, s, nats.UserInfo("rip", "pass")) 5771 defer nc2.Close() 5772 5773 if _, err := js2.AddStream(&nats.StreamConfig{Name: "ORIGIN", Subjects: []string{"foo.*"}}); err != nil { 5774 t.Fatalf("Unexpected error: %v", err) 5775 } 5776 5777 cfg := StreamConfig{ 5778 Name: "SCA", 5779 Storage: FileStorage, 5780 Sources: []*StreamSource{{ 5781 Name: "ORIGIN", 5782 External: &ExternalStream{ 5783 ApiPrefix: "RI.JS.API", 5784 DeliverPrefix: "RI.DELIVER.SYNC.SOURCES", 5785 }, 5786 }}, 5787 } 5788 req, err := json.Marshal(cfg) 5789 if err != nil { 5790 t.Fatalf("Unexpected error: %v", err) 5791 } 5792 resp, err := nc.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), req, time.Second) 5793 if err != nil { 5794 t.Fatalf("Unexpected error: %v", err) 5795 } 5796 var scResp JSApiStreamCreateResponse 5797 if err := json.Unmarshal(resp.Data, &scResp); err != nil { 5798 t.Fatalf("Unexpected error: %v", err) 5799 } 5800 if scResp.StreamInfo == nil || scResp.Error != nil { 5801 t.Fatalf("Did not receive correct response: %+v", scResp.Error) 5802 } 5803 5804 // Externals skip the checks for now. 5805 createConsumer("SCA", "foo.1") 5806 createConsumer("SCA", "bar.1") 5807 createConsumer("SCA", "baz") 5808 } 5809 5810 func TestJetStreamClusterCrossAccountMirrorsAndSources(t *testing.T) { 5811 c := createJetStreamClusterWithTemplate(t, jsClusterMirrorSourceImportsTempl, "C1", 3) 5812 defer c.shutdown() 5813 5814 // Create source stream under RI account. 5815 s := c.randomServer() 5816 nc, js := jsClientConnect(t, s, nats.UserInfo("rip", "pass")) 5817 defer nc.Close() 5818 5819 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 2}); err != nil { 5820 t.Fatalf("Unexpected error: %v", err) 5821 } 5822 5823 // use large number to tease out FC issues 5824 toSend := 3000 5825 for i := 0; i < toSend; i++ { 5826 if _, err := js.Publish("TEST", []byte("OK")); err != nil { 5827 t.Fatalf("Unexpected publish error: %v", err) 5828 } 5829 } 5830 5831 nc2, js2 := jsClientConnect(t, s) 5832 defer nc2.Close() 5833 5834 // Have to do this direct until we get Go client support. 5835 // Need to match jsClusterMirrorSourceImportsTempl imports. 5836 _, err := js2.AddStream(&nats.StreamConfig{ 5837 Name: "MY_MIRROR_TEST", 5838 Mirror: &nats.StreamSource{ 5839 Name: "TEST", 5840 External: &nats.ExternalStream{ 5841 APIPrefix: "RI.JS.API", 5842 DeliverPrefix: "RI.DELIVER.SYNC.MIRRORS", 5843 }, 5844 }, 5845 }) 5846 if err != nil { 5847 t.Fatalf("Unexpected error: %v", err) 5848 } 5849 5850 checkFor(t, 20*time.Second, 500*time.Millisecond, func() error { 5851 si, err := js2.StreamInfo("MY_MIRROR_TEST") 5852 if err != nil { 5853 t.Fatalf("Could not retrieve stream info: %s", err) 5854 } 5855 if si.State.Msgs != uint64(toSend) { 5856 return fmt.Errorf("Expected %d msgs, got state: %+v", toSend, si.State) 5857 } 5858 return nil 5859 }) 5860 5861 // Now do sources as well. 5862 _, err = js2.AddStream(&nats.StreamConfig{ 5863 Name: "MY_SOURCE_TEST", 5864 Sources: []*nats.StreamSource{ 5865 { 5866 Name: "TEST", 5867 External: &nats.ExternalStream{ 5868 APIPrefix: "RI.JS.API", 5869 DeliverPrefix: "RI.DELIVER.SYNC.SOURCES", 5870 }, 5871 }, 5872 }, 5873 }) 5874 if err != nil { 5875 t.Fatalf("Unexpected error: %v", err) 5876 } 5877 5878 checkFor(t, 20*time.Second, 100*time.Millisecond, func() error { 5879 si, err := js2.StreamInfo("MY_SOURCE_TEST") 5880 if err != nil { 5881 t.Fatalf("Could not retrieve stream info") 5882 } 5883 if si.State.Msgs != uint64(toSend) { 5884 return fmt.Errorf("Expected %d msgs, got state: %+v", toSend, si.State) 5885 } 5886 return nil 5887 }) 5888 5889 } 5890 5891 func TestJetStreamClusterFailMirrorsAndSources(t *testing.T) { 5892 c := createJetStreamClusterWithTemplate(t, jsClusterMirrorSourceImportsTempl, "C1", 3) 5893 defer c.shutdown() 5894 5895 // Create source stream under RI account. 5896 s := c.randomServer() 5897 nc, js := jsClientConnect(t, s, nats.UserInfo("rip", "pass")) 5898 defer nc.Close() 5899 5900 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 2, Subjects: []string{"test.>"}}); err != nil { 5901 t.Fatalf("Unexpected error: %v", err) 5902 } 5903 5904 nc2, _ := jsClientConnect(t, s, nats.UserInfo("rip", "pass")) 5905 defer nc2.Close() 5906 5907 testPrefix := func(testName string, id ErrorIdentifier, cfg StreamConfig) { 5908 t.Run(testName, func(t *testing.T) { 5909 req, err := json.Marshal(cfg) 5910 if err != nil { 5911 t.Fatalf("Unexpected error: %v", err) 5912 } 5913 resp, err := nc2.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), req, time.Second) 5914 if err != nil { 5915 t.Fatalf("Unexpected error: %v", err) 5916 } 5917 var scResp JSApiStreamCreateResponse 5918 if err := json.Unmarshal(resp.Data, &scResp); err != nil { 5919 t.Fatalf("Unexpected error: %v", err) 5920 } 5921 if scResp.Error == nil { 5922 t.Fatalf("Did expect an error but got none") 5923 } else if !IsNatsErr(scResp.Error, id) { 5924 t.Fatalf("Expected different error: %s", scResp.Error.Description) 5925 } 5926 }) 5927 } 5928 5929 testPrefix("mirror-bad-apiprefix", JSStreamExternalApiOverlapErrF, StreamConfig{ 5930 Name: "MY_MIRROR_TEST", 5931 Storage: FileStorage, 5932 Mirror: &StreamSource{ 5933 Name: "TEST", 5934 External: &ExternalStream{ 5935 ApiPrefix: "$JS.API", 5936 DeliverPrefix: "here", 5937 }, 5938 }, 5939 }) 5940 testPrefix("source-bad-apiprefix", JSStreamExternalApiOverlapErrF, StreamConfig{ 5941 Name: "MY_SOURCE_TEST", 5942 Storage: FileStorage, 5943 Sources: []*StreamSource{{ 5944 Name: "TEST", 5945 External: &ExternalStream{ 5946 ApiPrefix: "$JS.API", 5947 DeliverPrefix: "here", 5948 }, 5949 }, 5950 }, 5951 }) 5952 } 5953 5954 // 5955 // DO NOT ADD NEW TESTS IN THIS FILE 5956 // Add at the end of jetstream_cluster_<n>_test.go, with <n> being the highest value. 5957 //