get.pme.sh/pnats@v0.0.0-20240304004023-26bb5a137ed0/server/leafnode_test.go (about) 1 // Copyright 2019-2024 The NATS Authors 2 // Licensed under the Apache License, Version 2.0 (the "License"); 3 // you may not use this file except in compliance with the License. 4 // You may obtain a copy of the License at 5 // 6 // http://www.apache.org/licenses/LICENSE-2.0 7 // 8 // Unless required by applicable law or agreed to in writing, software 9 // distributed under the License is distributed on an "AS IS" BASIS, 10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package server 15 16 import ( 17 "bufio" 18 "bytes" 19 "context" 20 "crypto/tls" 21 "fmt" 22 "math/rand" 23 "net" 24 "net/http" 25 "net/http/httptest" 26 "net/url" 27 "reflect" 28 "strings" 29 "sync" 30 "sync/atomic" 31 "testing" 32 "time" 33 34 "github.com/nats-io/nkeys" 35 36 "github.com/klauspost/compress/s2" 37 jwt "github.com/nats-io/jwt/v2" 38 "github.com/nats-io/nats.go" 39 40 "get.pme.sh/pnats/internal/testhelper" 41 ) 42 43 type captureLeafNodeRandomIPLogger struct { 44 DummyLogger 45 ch chan struct{} 46 ips [3]int 47 } 48 49 func (c *captureLeafNodeRandomIPLogger) Debugf(format string, v ...interface{}) { 50 msg := fmt.Sprintf(format, v...) 51 if strings.Contains(msg, "hostname_to_resolve") { 52 ippos := strings.Index(msg, "127.0.0.") 53 if ippos != -1 { 54 n := int(msg[ippos+8] - '1') 55 c.ips[n]++ 56 for _, v := range c.ips { 57 if v < 2 { 58 return 59 } 60 } 61 // All IPs got at least some hit, we are done. 62 c.ch <- struct{}{} 63 } 64 } 65 } 66 67 func TestLeafNodeRandomIP(t *testing.T) { 68 u, err := url.Parse("nats://hostname_to_resolve:1234") 69 if err != nil { 70 t.Fatalf("Error parsing: %v", err) 71 } 72 73 resolver := &myDummyDNSResolver{ips: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3"}} 74 75 o := DefaultOptions() 76 o.Host = "127.0.0.1" 77 o.Port = -1 78 o.LeafNode.Port = 0 79 o.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{u}}} 80 o.LeafNode.ReconnectInterval = 50 * time.Millisecond 81 o.LeafNode.resolver = resolver 82 o.LeafNode.dialTimeout = 15 * time.Millisecond 83 s := RunServer(o) 84 defer s.Shutdown() 85 86 l := &captureLeafNodeRandomIPLogger{ch: make(chan struct{})} 87 s.SetLogger(l, true, true) 88 89 select { 90 case <-l.ch: 91 case <-time.After(3 * time.Second): 92 t.Fatalf("Does not seem to have used random IPs") 93 } 94 } 95 96 func TestLeafNodeRandomRemotes(t *testing.T) { 97 // 16! possible permutations. 98 orderedURLs := make([]*url.URL, 0, 16) 99 for i := 0; i < cap(orderedURLs); i++ { 100 orderedURLs = append(orderedURLs, &url.URL{ 101 Scheme: "nats-leaf", 102 Host: fmt.Sprintf("host%d:7422", i), 103 }) 104 } 105 106 o := DefaultOptions() 107 o.LeafNode.Remotes = []*RemoteLeafOpts{ 108 {NoRandomize: true}, 109 {NoRandomize: false}, 110 } 111 o.LeafNode.Remotes[0].URLs = make([]*url.URL, cap(orderedURLs)) 112 copy(o.LeafNode.Remotes[0].URLs, orderedURLs) 113 o.LeafNode.Remotes[1].URLs = make([]*url.URL, cap(orderedURLs)) 114 copy(o.LeafNode.Remotes[1].URLs, orderedURLs) 115 116 s := RunServer(o) 117 defer s.Shutdown() 118 119 s.mu.Lock() 120 r1 := s.leafRemoteCfgs[0] 121 r2 := s.leafRemoteCfgs[1] 122 s.mu.Unlock() 123 124 r1.RLock() 125 gotOrdered := r1.urls 126 r1.RUnlock() 127 if got, want := len(gotOrdered), len(orderedURLs); got != want { 128 t.Fatalf("Unexpected rem0 len URLs, got %d, want %d", got, want) 129 } 130 131 // These should be IN order. 132 for i := range orderedURLs { 133 if got, want := gotOrdered[i].String(), orderedURLs[i].String(); got != want { 134 t.Fatalf("Unexpected ordered url, got %s, want %s", got, want) 135 } 136 } 137 138 r2.RLock() 139 gotRandom := r2.urls 140 r2.RUnlock() 141 if got, want := len(gotRandom), len(orderedURLs); got != want { 142 t.Fatalf("Unexpected rem1 len URLs, got %d, want %d", got, want) 143 } 144 145 // These should be OUT of order. 146 var random bool 147 for i := range orderedURLs { 148 if gotRandom[i].String() != orderedURLs[i].String() { 149 random = true 150 break 151 } 152 } 153 if !random { 154 t.Fatal("Expected urls to be random") 155 } 156 } 157 158 type testLoopbackResolver struct{} 159 160 func (r *testLoopbackResolver) LookupHost(ctx context.Context, host string) ([]string, error) { 161 return []string{"127.0.0.1"}, nil 162 } 163 164 func TestLeafNodeTLSWithCerts(t *testing.T) { 165 conf1 := createConfFile(t, []byte(` 166 port: -1 167 leaf { 168 listen: "127.0.0.1:-1" 169 tls { 170 ca_file: "../test/configs/certs/tlsauth/ca.pem" 171 cert_file: "../test/configs/certs/tlsauth/server.pem" 172 key_file: "../test/configs/certs/tlsauth/server-key.pem" 173 timeout: 2 174 } 175 } 176 `)) 177 s1, o1 := RunServerWithConfig(conf1) 178 defer s1.Shutdown() 179 180 u, err := url.Parse(fmt.Sprintf("nats://localhost:%d", o1.LeafNode.Port)) 181 if err != nil { 182 t.Fatalf("Error parsing url: %v", err) 183 } 184 conf2 := createConfFile(t, []byte(fmt.Sprintf(` 185 port: -1 186 leaf { 187 remotes [ 188 { 189 url: "%s" 190 tls { 191 ca_file: "../test/configs/certs/tlsauth/ca.pem" 192 cert_file: "../test/configs/certs/tlsauth/client.pem" 193 key_file: "../test/configs/certs/tlsauth/client-key.pem" 194 timeout: 2 195 } 196 } 197 ] 198 } 199 `, u.String()))) 200 o2, err := ProcessConfigFile(conf2) 201 if err != nil { 202 t.Fatalf("Error processing config file: %v", err) 203 } 204 o2.NoLog, o2.NoSigs = true, true 205 o2.LeafNode.resolver = &testLoopbackResolver{} 206 s2 := RunServer(o2) 207 defer s2.Shutdown() 208 209 checkFor(t, 3*time.Second, 10*time.Millisecond, func() error { 210 if nln := s1.NumLeafNodes(); nln != 1 { 211 return fmt.Errorf("Number of leaf nodes is %d", nln) 212 } 213 return nil 214 }) 215 } 216 217 func TestLeafNodeTLSRemoteWithNoCerts(t *testing.T) { 218 conf1 := createConfFile(t, []byte(` 219 port: -1 220 leaf { 221 listen: "127.0.0.1:-1" 222 tls { 223 ca_file: "../test/configs/certs/tlsauth/ca.pem" 224 cert_file: "../test/configs/certs/tlsauth/server.pem" 225 key_file: "../test/configs/certs/tlsauth/server-key.pem" 226 timeout: 2 227 } 228 } 229 `)) 230 s1, o1 := RunServerWithConfig(conf1) 231 defer s1.Shutdown() 232 233 u, err := url.Parse(fmt.Sprintf("nats://localhost:%d", o1.LeafNode.Port)) 234 if err != nil { 235 t.Fatalf("Error parsing url: %v", err) 236 } 237 conf2 := createConfFile(t, []byte(fmt.Sprintf(` 238 port: -1 239 leaf { 240 remotes [ 241 { 242 url: "%s" 243 tls { 244 ca_file: "../test/configs/certs/tlsauth/ca.pem" 245 timeout: 5 246 } 247 } 248 ] 249 } 250 `, u.String()))) 251 o2, err := ProcessConfigFile(conf2) 252 if err != nil { 253 t.Fatalf("Error processing config file: %v", err) 254 } 255 256 if len(o2.LeafNode.Remotes) == 0 { 257 t.Fatal("Expected at least a single leaf remote") 258 } 259 260 var ( 261 got float64 = o2.LeafNode.Remotes[0].TLSTimeout 262 expected float64 = 5 263 ) 264 if got != expected { 265 t.Fatalf("Expected %v, got: %v", expected, got) 266 } 267 o2.NoLog, o2.NoSigs = true, true 268 o2.LeafNode.resolver = &testLoopbackResolver{} 269 s2 := RunServer(o2) 270 defer s2.Shutdown() 271 272 checkFor(t, 3*time.Second, 10*time.Millisecond, func() error { 273 if nln := s1.NumLeafNodes(); nln != 1 { 274 return fmt.Errorf("Number of leaf nodes is %d", nln) 275 } 276 return nil 277 }) 278 279 // Here we only process options without starting the server 280 // and without a root CA for the remote. 281 conf3 := createConfFile(t, []byte(fmt.Sprintf(` 282 port: -1 283 leaf { 284 remotes [ 285 { 286 url: "%s" 287 tls { 288 timeout: 10 289 } 290 } 291 ] 292 } 293 `, u.String()))) 294 o3, err := ProcessConfigFile(conf3) 295 if err != nil { 296 t.Fatalf("Error processing config file: %v", err) 297 } 298 299 if len(o3.LeafNode.Remotes) == 0 { 300 t.Fatal("Expected at least a single leaf remote") 301 } 302 got = o3.LeafNode.Remotes[0].TLSTimeout 303 expected = 10 304 if got != expected { 305 t.Fatalf("Expected %v, got: %v", expected, got) 306 } 307 308 // Here we only process options without starting the server 309 // and check the default for leafnode remotes. 310 conf4 := createConfFile(t, []byte(fmt.Sprintf(` 311 port: -1 312 leaf { 313 remotes [ 314 { 315 url: "%s" 316 tls { 317 ca_file: "../test/configs/certs/tlsauth/ca.pem" 318 } 319 } 320 ] 321 } 322 `, u.String()))) 323 o4, err := ProcessConfigFile(conf4) 324 if err != nil { 325 t.Fatalf("Error processing config file: %v", err) 326 } 327 328 if len(o4.LeafNode.Remotes) == 0 { 329 t.Fatal("Expected at least a single leaf remote") 330 } 331 got = o4.LeafNode.Remotes[0].TLSTimeout 332 expected = float64(DEFAULT_LEAF_TLS_TIMEOUT) / float64(time.Second) 333 if int(got) != int(expected) { 334 t.Fatalf("Expected %v, got: %v", expected, got) 335 } 336 } 337 338 type captureErrorLogger struct { 339 DummyLogger 340 errCh chan string 341 } 342 343 func (l *captureErrorLogger) Errorf(format string, v ...interface{}) { 344 select { 345 case l.errCh <- fmt.Sprintf(format, v...): 346 default: 347 } 348 } 349 350 func TestLeafNodeAccountNotFound(t *testing.T) { 351 ob := DefaultOptions() 352 ob.LeafNode.Host = "127.0.0.1" 353 ob.LeafNode.Port = -1 354 sb := RunServer(ob) 355 defer sb.Shutdown() 356 357 u, _ := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", ob.LeafNode.Port)) 358 359 oa := DefaultOptions() 360 oa.Cluster.Name = "xyz" 361 oa.LeafNode.ReconnectInterval = 10 * time.Millisecond 362 oa.LeafNode.Remotes = []*RemoteLeafOpts{ 363 { 364 LocalAccount: "foo", 365 URLs: []*url.URL{u}, 366 }, 367 } 368 // Expected to fail 369 if _, err := NewServer(oa); err == nil || !strings.Contains(err.Error(), "local account") { 370 t.Fatalf("Expected server to fail with error about no local account, got %v", err) 371 } 372 oa.Accounts = []*Account{NewAccount("foo")} 373 sa := RunServer(oa) 374 defer sa.Shutdown() 375 376 l := &captureErrorLogger{errCh: make(chan string, 1)} 377 sa.SetLogger(l, false, false) 378 379 checkLeafNodeConnected(t, sa) 380 381 // Now simulate account is removed with config reload, or it expires. 382 sa.accounts.Delete("foo") 383 384 // Restart B (with same Port) 385 sb.Shutdown() 386 sb = RunServer(ob) 387 defer sb.Shutdown() 388 389 // Wait for report of error 390 select { 391 case e := <-l.errCh: 392 if !strings.Contains(e, "Unable to lookup account") { 393 t.Fatalf("Expected error about no local account, got %s", e) 394 } 395 case <-time.After(2 * time.Second): 396 t.Fatalf("Did not get the error") 397 } 398 399 // TODO below test is bogus. Instead add the account, do a reload, and make sure the connection works. 400 401 // For now, sa would try to recreate the connection for ever. 402 // Check that lid is increasing... 403 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 404 if lid := atomic.LoadUint64(&sa.gcid); lid < 3 { 405 return fmt.Errorf("Seems like connection was not retried, lid currently only at %d", lid) 406 } 407 return nil 408 }) 409 } 410 411 // This test ensures that we can connect using proper user/password 412 // to a LN URL that was discovered through the INFO protocol. 413 // We also check that the password doesn't leak to debug/trace logs. 414 func TestLeafNodeBasicAuthFailover(t *testing.T) { 415 // Something a little longer than "pwd" to prevent false positives amongst many log lines; 416 // don't make it complex enough to be subject to %-escaping, we want a simple needle search. 417 fatalPassword := "pwdfatal" 418 419 content := ` 420 listen: "127.0.0.1:-1" 421 cluster { 422 name: "abc" 423 listen: "127.0.0.1:-1" 424 %s 425 } 426 leafnodes { 427 listen: "127.0.0.1:-1" 428 authorization { 429 user: foo 430 password: %s 431 timeout: 1 432 } 433 } 434 ` 435 conf := createConfFile(t, []byte(fmt.Sprintf(content, "", fatalPassword))) 436 437 sb1, ob1 := RunServerWithConfig(conf) 438 defer sb1.Shutdown() 439 440 conf = createConfFile(t, []byte(fmt.Sprintf(content, fmt.Sprintf("routes: [nats://127.0.0.1:%d]", ob1.Cluster.Port), fatalPassword))) 441 442 sb2, _ := RunServerWithConfig(conf) 443 defer sb2.Shutdown() 444 445 checkClusterFormed(t, sb1, sb2) 446 447 content = ` 448 port: -1 449 accounts { 450 foo {} 451 } 452 leafnodes { 453 listen: "127.0.0.1:-1" 454 remotes [ 455 { 456 account: "foo" 457 url: "nats://foo:%s@127.0.0.1:%d" 458 } 459 ] 460 } 461 ` 462 conf = createConfFile(t, []byte(fmt.Sprintf(content, fatalPassword, ob1.LeafNode.Port))) 463 464 sa, _ := RunServerWithConfig(conf) 465 defer sa.Shutdown() 466 467 l := testhelper.NewDummyLogger(100) 468 sa.SetLogger(l, true, true) // we want debug & trace logs, to check for passwords in them 469 470 checkLeafNodeConnected(t, sa) 471 472 // Shutdown sb1, sa should reconnect to sb2 473 sb1.Shutdown() 474 475 // Wait a bit to make sure there was a disconnect and attempt to reconnect. 476 time.Sleep(250 * time.Millisecond) 477 478 // Should be able to reconnect 479 checkLeafNodeConnected(t, sa) 480 481 // Look at all our logs for the password; at time of writing it doesn't appear 482 // but we want to safe-guard against it. 483 l.CheckForProhibited(t, "fatal password", fatalPassword) 484 } 485 486 func TestLeafNodeRTT(t *testing.T) { 487 ob := DefaultOptions() 488 ob.PingInterval = 15 * time.Millisecond 489 ob.LeafNode.Host = "127.0.0.1" 490 ob.LeafNode.Port = -1 491 sb := RunServer(ob) 492 defer sb.Shutdown() 493 494 lnBURL, _ := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", ob.LeafNode.Port)) 495 oa := DefaultOptions() 496 oa.Cluster.Name = "xyz" 497 oa.PingInterval = 15 * time.Millisecond 498 oa.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{lnBURL}}} 499 sa := RunServer(oa) 500 defer sa.Shutdown() 501 502 checkLeafNodeConnected(t, sa) 503 504 checkRTT := func(t *testing.T, s *Server) time.Duration { 505 t.Helper() 506 var ln *client 507 s.mu.Lock() 508 for _, l := range s.leafs { 509 ln = l 510 break 511 } 512 s.mu.Unlock() 513 var rtt time.Duration 514 checkFor(t, 2*firstPingInterval, 15*time.Millisecond, func() error { 515 ln.mu.Lock() 516 rtt = ln.rtt 517 ln.mu.Unlock() 518 if rtt == 0 { 519 return fmt.Errorf("RTT not tracked") 520 } 521 return nil 522 }) 523 return rtt 524 } 525 526 prevA := checkRTT(t, sa) 527 prevB := checkRTT(t, sb) 528 529 // Wait to see if RTT is updated 530 checkUpdated := func(t *testing.T, s *Server, prev time.Duration) { 531 attempts := 0 532 timeout := time.Now().Add(2 * firstPingInterval) 533 for time.Now().Before(timeout) { 534 if rtt := checkRTT(t, s); rtt != prev { 535 return 536 } 537 attempts++ 538 if attempts == 5 { 539 s.mu.Lock() 540 for _, ln := range s.leafs { 541 ln.mu.Lock() 542 ln.rtt = 0 543 ln.mu.Unlock() 544 break 545 } 546 s.mu.Unlock() 547 } 548 time.Sleep(15 * time.Millisecond) 549 } 550 t.Fatalf("RTT probably not updated") 551 } 552 checkUpdated(t, sa, prevA) 553 checkUpdated(t, sb, prevB) 554 555 sa.Shutdown() 556 sb.Shutdown() 557 558 // Now check that initial RTT is computed prior to first PingInterval 559 // Get new options to avoid possible race changing the ping interval. 560 ob = DefaultOptions() 561 ob.Cluster.Name = "xyz" 562 ob.PingInterval = time.Minute 563 ob.LeafNode.Host = "127.0.0.1" 564 ob.LeafNode.Port = -1 565 sb = RunServer(ob) 566 defer sb.Shutdown() 567 568 lnBURL, _ = url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", ob.LeafNode.Port)) 569 oa = DefaultOptions() 570 oa.PingInterval = time.Minute 571 oa.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{lnBURL}}} 572 sa = RunServer(oa) 573 defer sa.Shutdown() 574 575 checkLeafNodeConnected(t, sa) 576 577 checkRTT(t, sa) 578 checkRTT(t, sb) 579 } 580 581 func TestLeafNodeValidateAuthOptions(t *testing.T) { 582 opts := DefaultOptions() 583 opts.LeafNode.Username = "user1" 584 opts.LeafNode.Password = "pwd" 585 opts.LeafNode.Users = []*User{{Username: "user", Password: "pwd"}} 586 if _, err := NewServer(opts); err == nil || !strings.Contains(err.Error(), 587 "can not have a single user/pass and a users array") { 588 t.Fatalf("Expected error about mixing single/multi users, got %v", err) 589 } 590 591 // Check duplicate user names 592 opts.LeafNode.Username = _EMPTY_ 593 opts.LeafNode.Password = _EMPTY_ 594 opts.LeafNode.Users = append(opts.LeafNode.Users, &User{Username: "user", Password: "pwd"}) 595 if _, err := NewServer(opts); err == nil || !strings.Contains(err.Error(), "duplicate user") { 596 t.Fatalf("Expected error about duplicate user, got %v", err) 597 } 598 } 599 600 func TestLeafNodeBasicAuthSingleton(t *testing.T) { 601 opts := DefaultOptions() 602 opts.LeafNode.Port = -1 603 opts.LeafNode.Account = "unknown" 604 if s, err := NewServer(opts); err == nil || !strings.Contains(err.Error(), "cannot find") { 605 if s != nil { 606 s.Shutdown() 607 } 608 t.Fatalf("Expected error about account not found, got %v", err) 609 } 610 611 template := ` 612 port: -1 613 accounts: { 614 ACC1: { users = [{user: "user1", password: "user1"}] } 615 ACC2: { users = [{user: "user2", password: "user2"}] } 616 } 617 leafnodes: { 618 port: -1 619 authorization { 620 %s 621 account: "ACC1" 622 } 623 } 624 ` 625 for iter, test := range []struct { 626 name string 627 userSpec string 628 lnURLCreds string 629 shouldFail bool 630 }{ 631 {"no user creds required and no user so binds to ACC1", "", "", false}, 632 {"no user creds required and pick user2 associated to ACC2", "", "user2:user2@", false}, 633 {"no user creds required and unknown user should fail", "", "unknown:user@", true}, 634 {"user creds required so binds to ACC1", "user: \"ln\"\npass: \"pwd\"", "ln:pwd@", false}, 635 } { 636 t.Run(test.name, func(t *testing.T) { 637 638 conf := createConfFile(t, []byte(fmt.Sprintf(template, test.userSpec))) 639 s1, o1 := RunServerWithConfig(conf) 640 defer s1.Shutdown() 641 642 // Create a sub on "foo" for account ACC1 (user user1), which is the one 643 // bound to the accepted LN connection. 644 ncACC1 := natsConnect(t, fmt.Sprintf("nats://user1:user1@%s:%d", o1.Host, o1.Port)) 645 defer ncACC1.Close() 646 sub1 := natsSubSync(t, ncACC1, "foo") 647 natsFlush(t, ncACC1) 648 649 // Create a sub on "foo" for account ACC2 (user user2). This one should 650 // not receive any message. 651 ncACC2 := natsConnect(t, fmt.Sprintf("nats://user2:user2@%s:%d", o1.Host, o1.Port)) 652 defer ncACC2.Close() 653 sub2 := natsSubSync(t, ncACC2, "foo") 654 natsFlush(t, ncACC2) 655 656 conf = createConfFile(t, []byte(fmt.Sprintf(` 657 port: -1 658 leafnodes: { 659 remotes = [ { url: "nats-leaf://%s%s:%d" } ] 660 } 661 `, test.lnURLCreds, o1.LeafNode.Host, o1.LeafNode.Port))) 662 s2, _ := RunServerWithConfig(conf) 663 defer s2.Shutdown() 664 665 if test.shouldFail { 666 // Wait a bit and ensure that there is no leaf node connection 667 time.Sleep(100 * time.Millisecond) 668 checkFor(t, time.Second, 15*time.Millisecond, func() error { 669 if n := s1.NumLeafNodes(); n != 0 { 670 return fmt.Errorf("Expected no leafnode connection, got %v", n) 671 } 672 return nil 673 }) 674 return 675 } 676 677 checkLeafNodeConnected(t, s2) 678 679 nc := natsConnect(t, s2.ClientURL()) 680 defer nc.Close() 681 natsPub(t, nc, "foo", []byte("hello")) 682 // If url contains known user, even when there is no credentials 683 // required, the connection will be bound to the user's account. 684 if iter == 1 { 685 // Should not receive on "ACC1", but should on "ACC2" 686 if _, err := sub1.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 687 t.Fatalf("Expected timeout error, got %v", err) 688 } 689 natsNexMsg(t, sub2, time.Second) 690 } else { 691 // Should receive on "ACC1"... 692 natsNexMsg(t, sub1, time.Second) 693 // but not received on "ACC2" since leafnode bound to account "ACC1". 694 if _, err := sub2.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 695 t.Fatalf("Expected timeout error, got %v", err) 696 } 697 } 698 }) 699 } 700 } 701 702 func TestLeafNodeBasicAuthMultiple(t *testing.T) { 703 conf := createConfFile(t, []byte(` 704 port: -1 705 accounts: { 706 S1ACC1: { users = [{user: "user1", password: "user1"}] } 707 S1ACC2: { users = [{user: "user2", password: "user2"}] } 708 } 709 leafnodes: { 710 port: -1 711 authorization { 712 users = [ 713 {user: "ln1", password: "ln1", account: "S1ACC1"} 714 {user: "ln2", password: "ln2", account: "S1ACC2"} 715 {user: "ln3", password: "ln3"} 716 ] 717 } 718 } 719 `)) 720 s1, o1 := RunServerWithConfig(conf) 721 defer s1.Shutdown() 722 723 // Make sure that we reject a LN connection if user does not match 724 conf = createConfFile(t, []byte(fmt.Sprintf(` 725 port: -1 726 leafnodes: { 727 remotes = [{url: "nats-leaf://wron:user@%s:%d"}] 728 } 729 `, o1.LeafNode.Host, o1.LeafNode.Port))) 730 s2, _ := RunServerWithConfig(conf) 731 defer s2.Shutdown() 732 // Give a chance for s2 to attempt to connect and make sure that s1 733 // did not register a LN connection. 734 time.Sleep(100 * time.Millisecond) 735 if n := s1.NumLeafNodes(); n != 0 { 736 t.Fatalf("Expected no leafnode connection, got %v", n) 737 } 738 s2.Shutdown() 739 740 ncACC1 := natsConnect(t, fmt.Sprintf("nats://user1:user1@%s:%d", o1.Host, o1.Port)) 741 defer ncACC1.Close() 742 sub1 := natsSubSync(t, ncACC1, "foo") 743 natsFlush(t, ncACC1) 744 745 ncACC2 := natsConnect(t, fmt.Sprintf("nats://user2:user2@%s:%d", o1.Host, o1.Port)) 746 defer ncACC2.Close() 747 sub2 := natsSubSync(t, ncACC2, "foo") 748 natsFlush(t, ncACC2) 749 750 // We will start s2 with 2 LN connections that should bind local account S2ACC1 751 // to account S1ACC1 and S2ACC2 to account S1ACC2 on s1. 752 conf = createConfFile(t, []byte(fmt.Sprintf(` 753 port: -1 754 accounts { 755 S2ACC1 { users = [{user: "user1", password: "user1"}] } 756 S2ACC2 { users = [{user: "user2", password: "user2"}] } 757 } 758 leafnodes: { 759 remotes = [ 760 { 761 url: "nats-leaf://ln1:ln1@%s:%d" 762 account: "S2ACC1" 763 } 764 { 765 url: "nats-leaf://ln2:ln2@%s:%d" 766 account: "S2ACC2" 767 } 768 ] 769 } 770 `, o1.LeafNode.Host, o1.LeafNode.Port, o1.LeafNode.Host, o1.LeafNode.Port))) 771 s2, o2 := RunServerWithConfig(conf) 772 defer s2.Shutdown() 773 774 checkFor(t, 5*time.Second, 100*time.Millisecond, func() error { 775 if nln := s2.NumLeafNodes(); nln != 2 { 776 return fmt.Errorf("Expected 2 connected leafnodes for server %q, got %d", s2.ID(), nln) 777 } 778 return nil 779 }) 780 781 // Create a user connection on s2 that binds to S2ACC1 (use user1). 782 nc1 := natsConnect(t, fmt.Sprintf("nats://user1:user1@%s:%d", o2.Host, o2.Port)) 783 defer nc1.Close() 784 785 // Create an user connection on s2 that binds to S2ACC2 (use user2). 786 nc2 := natsConnect(t, fmt.Sprintf("nats://user2:user2@%s:%d", o2.Host, o2.Port)) 787 defer nc2.Close() 788 789 // Now if a message is published from nc1, sub1 should receive it since 790 // their account are bound together. 791 natsPub(t, nc1, "foo", []byte("hello")) 792 natsNexMsg(t, sub1, time.Second) 793 // But sub2 should not receive it since different account. 794 if _, err := sub2.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 795 t.Fatalf("Expected timeout error, got %v", err) 796 } 797 798 // Now use nc2 (S2ACC2) to publish 799 natsPub(t, nc2, "foo", []byte("hello")) 800 // Expect sub2 to receive and sub1 not to. 801 natsNexMsg(t, sub2, time.Second) 802 if _, err := sub1.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 803 t.Fatalf("Expected timeout error, got %v", err) 804 } 805 806 // Now check that we don't panic if no account is specified for 807 // a given user. 808 conf = createConfFile(t, []byte(fmt.Sprintf(` 809 port: -1 810 leafnodes: { 811 remotes = [ 812 { url: "nats-leaf://ln3:ln3@%s:%d" } 813 ] 814 } 815 `, o1.LeafNode.Host, o1.LeafNode.Port))) 816 s3, _ := RunServerWithConfig(conf) 817 defer s3.Shutdown() 818 } 819 820 type loopDetectedLogger struct { 821 DummyLogger 822 ch chan string 823 } 824 825 func (l *loopDetectedLogger) Errorf(format string, v ...interface{}) { 826 msg := fmt.Sprintf(format, v...) 827 if strings.Contains(msg, "Loop") { 828 select { 829 case l.ch <- msg: 830 default: 831 } 832 } 833 } 834 835 func TestLeafNodeLoop(t *testing.T) { 836 test := func(t *testing.T, cluster bool) { 837 // This test requires that we set the port to known value because 838 // we want A point to B and B to A. 839 oa := DefaultOptions() 840 oa.ServerName = "A" 841 if !cluster { 842 oa.Cluster.Port = 0 843 oa.Cluster.Name = _EMPTY_ 844 } 845 oa.LeafNode.ReconnectInterval = 10 * time.Millisecond 846 oa.LeafNode.Port = 1234 847 ub, _ := url.Parse("nats://127.0.0.1:5678") 848 oa.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{ub}}} 849 oa.LeafNode.connDelay = 50 * time.Millisecond 850 sa := RunServer(oa) 851 defer sa.Shutdown() 852 853 la := &loopDetectedLogger{ch: make(chan string, 1)} 854 sa.SetLogger(la, false, false) 855 856 ob := DefaultOptions() 857 ob.ServerName = "B" 858 if !cluster { 859 ob.Cluster.Port = 0 860 ob.Cluster.Name = _EMPTY_ 861 } else { 862 ob.Cluster.Name = "xyz" 863 } 864 ob.LeafNode.ReconnectInterval = 10 * time.Millisecond 865 ob.LeafNode.Port = 5678 866 ua, _ := url.Parse("nats://127.0.0.1:1234") 867 ob.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{ua}}} 868 ob.LeafNode.connDelay = 50 * time.Millisecond 869 sb := RunServer(ob) 870 defer sb.Shutdown() 871 872 lb := &loopDetectedLogger{ch: make(chan string, 1)} 873 sb.SetLogger(lb, false, false) 874 875 select { 876 case <-la.ch: 877 // OK! 878 case <-lb.ch: 879 // OK! 880 case <-time.After(5 * time.Second): 881 t.Fatalf("Did not get any error regarding loop") 882 } 883 884 sb.Shutdown() 885 ob.Port = -1 886 ob.Cluster.Port = -1 887 ob.LeafNode.Remotes = nil 888 sb = RunServer(ob) 889 defer sb.Shutdown() 890 891 checkLeafNodeConnected(t, sa) 892 } 893 t.Run("standalone", func(t *testing.T) { test(t, false) }) 894 t.Run("cluster", func(t *testing.T) { test(t, true) }) 895 } 896 897 func TestLeafNodeLoopFromDAG(t *testing.T) { 898 // We want B & C to point to A, A itself does not point to any other server. 899 // We need to cancel clustering since now this will suppress on its own. 900 oa := DefaultOptions() 901 oa.ServerName = "A" 902 oa.LeafNode.connDelay = 50 * time.Millisecond 903 oa.LeafNode.ReconnectInterval = 10 * time.Millisecond 904 oa.LeafNode.Port = -1 905 oa.Cluster = ClusterOpts{} 906 sa := RunServer(oa) 907 defer sa.Shutdown() 908 909 ua, _ := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", oa.LeafNode.Port)) 910 911 // B will point to A 912 ob := DefaultOptions() 913 ob.ServerName = "B" 914 ob.LeafNode.connDelay = 50 * time.Millisecond 915 ob.LeafNode.ReconnectInterval = 10 * time.Millisecond 916 ob.LeafNode.Port = -1 917 ob.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{ua}}} 918 ob.Cluster = ClusterOpts{} 919 sb := RunServer(ob) 920 defer sb.Shutdown() 921 922 ub, _ := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", ob.LeafNode.Port)) 923 924 checkLeafNodeConnected(t, sa) 925 checkLeafNodeConnected(t, sb) 926 927 // C will point to A and B 928 oc := DefaultOptions() 929 oc.ServerName = "C" 930 oc.LeafNode.connDelay = 50 * time.Millisecond 931 oc.LeafNode.ReconnectInterval = 10 * time.Millisecond 932 oc.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{ua}}, {URLs: []*url.URL{ub}}} 933 oc.LeafNode.connDelay = 100 * time.Millisecond // Allow logger to be attached before connecting. 934 oc.Cluster = ClusterOpts{} 935 sc := RunServer(oc) 936 937 lc := &loopDetectedLogger{ch: make(chan string, 1)} 938 sc.SetLogger(lc, false, false) 939 940 // We should get an error. 941 select { 942 case <-lc.ch: 943 // OK 944 case <-time.After(2 * time.Second): 945 t.Fatalf("Did not get any error regarding loop") 946 } 947 948 // C should not be connected to anything. 949 checkLeafNodeConnectedCount(t, sc, 0) 950 // A and B are connected to each other. 951 checkLeafNodeConnectedCount(t, sa, 1) 952 checkLeafNodeConnectedCount(t, sb, 1) 953 954 // Shutdown C and restart without the loop. 955 sc.Shutdown() 956 oc.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{ub}}} 957 958 sc = RunServer(oc) 959 defer sc.Shutdown() 960 961 checkLeafNodeConnectedCount(t, sa, 1) 962 checkLeafNodeConnectedCount(t, sb, 2) 963 checkLeafNodeConnectedCount(t, sc, 1) 964 } 965 966 func TestLeafNodeCloseTLSConnection(t *testing.T) { 967 opts := DefaultOptions() 968 opts.DisableShortFirstPing = true 969 opts.LeafNode.Host = "127.0.0.1" 970 opts.LeafNode.Port = -1 971 opts.LeafNode.TLSTimeout = 100 972 tc := &TLSConfigOpts{ 973 CertFile: "./configs/certs/server.pem", 974 KeyFile: "./configs/certs/key.pem", 975 Insecure: true, 976 } 977 tlsConf, err := GenTLSConfig(tc) 978 if err != nil { 979 t.Fatalf("Error generating tls config: %v", err) 980 } 981 opts.LeafNode.TLSConfig = tlsConf 982 opts.NoLog = true 983 opts.NoSigs = true 984 s := RunServer(opts) 985 defer s.Shutdown() 986 987 endpoint := fmt.Sprintf("%s:%d", opts.LeafNode.Host, opts.LeafNode.Port) 988 conn, err := net.DialTimeout("tcp", endpoint, 2*time.Second) 989 if err != nil { 990 t.Fatalf("Unexpected error on dial: %v", err) 991 } 992 defer conn.Close() 993 994 br := bufio.NewReaderSize(conn, 100) 995 if _, err := br.ReadString('\n'); err != nil { 996 t.Fatalf("Unexpected error reading INFO: %v", err) 997 } 998 999 tlsConn := tls.Client(conn, &tls.Config{InsecureSkipVerify: true}) 1000 defer tlsConn.Close() 1001 if err := tlsConn.Handshake(); err != nil { 1002 t.Fatalf("Unexpected error during handshake: %v", err) 1003 } 1004 connectOp := []byte("CONNECT {\"name\":\"leaf\",\"verbose\":false,\"pedantic\":false}\r\n") 1005 if _, err := tlsConn.Write(connectOp); err != nil { 1006 t.Fatalf("Unexpected error writing CONNECT: %v", err) 1007 } 1008 if _, err := tlsConn.Write([]byte("PING\r\n")); err != nil { 1009 t.Fatalf("Unexpected error writing PING: %v", err) 1010 } 1011 1012 checkLeafNodeConnected(t, s) 1013 1014 // Get leaf connection 1015 var leaf *client 1016 s.mu.Lock() 1017 for _, l := range s.leafs { 1018 leaf = l 1019 break 1020 } 1021 s.mu.Unlock() 1022 // Fill the buffer. We want to timeout on write so that nc.Close() 1023 // would block due to a write that cannot complete. 1024 buf := make([]byte, 64*1024) 1025 done := false 1026 for !done { 1027 leaf.nc.SetWriteDeadline(time.Now().Add(time.Second)) 1028 if _, err := leaf.nc.Write(buf); err != nil { 1029 done = true 1030 } 1031 leaf.nc.SetWriteDeadline(time.Time{}) 1032 } 1033 ch := make(chan bool) 1034 go func() { 1035 select { 1036 case <-ch: 1037 return 1038 case <-time.After(3 * time.Second): 1039 fmt.Println("!!!! closeConnection is blocked, test will hang !!!") 1040 return 1041 } 1042 }() 1043 // Close the route 1044 leaf.closeConnection(SlowConsumerWriteDeadline) 1045 ch <- true 1046 } 1047 1048 func TestLeafNodeTLSSaveName(t *testing.T) { 1049 opts := DefaultOptions() 1050 opts.LeafNode.Host = "127.0.0.1" 1051 opts.LeafNode.Port = -1 1052 tc := &TLSConfigOpts{ 1053 CertFile: "../test/configs/certs/server-noip.pem", 1054 KeyFile: "../test/configs/certs/server-key-noip.pem", 1055 Insecure: true, 1056 } 1057 tlsConf, err := GenTLSConfig(tc) 1058 if err != nil { 1059 t.Fatalf("Error generating tls config: %v", err) 1060 } 1061 opts.LeafNode.TLSConfig = tlsConf 1062 s := RunServer(opts) 1063 defer s.Shutdown() 1064 1065 lo := DefaultOptions() 1066 u, _ := url.Parse(fmt.Sprintf("nats://localhost:%d", opts.LeafNode.Port)) 1067 lo.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{u}}} 1068 lo.LeafNode.ReconnectInterval = 15 * time.Millisecond 1069 ln := RunServer(lo) 1070 defer ln.Shutdown() 1071 1072 // We know connection will fail, but it should not fail because of error such as: 1073 // "cannot validate certificate for 127.0.0.1 because it doesn't contain any IP SANs" 1074 // This would mean that we are not saving the hostname to use during the TLS handshake. 1075 1076 le := &captureErrorLogger{errCh: make(chan string, 100)} 1077 ln.SetLogger(le, false, false) 1078 1079 tm := time.NewTimer(time.Second) 1080 var done bool 1081 for !done { 1082 select { 1083 case err := <-le.errCh: 1084 if strings.Contains(err, "doesn't contain any IP SANs") { 1085 t.Fatalf("Got this error: %q", err) 1086 } 1087 case <-tm.C: 1088 done = true 1089 } 1090 } 1091 } 1092 1093 func TestLeafNodeRemoteWrongPort(t *testing.T) { 1094 for _, test1 := range []struct { 1095 name string 1096 clusterAdvertise bool 1097 leafnodeAdvertise bool 1098 }{ 1099 {"advertise_on", false, false}, 1100 {"cluster_no_advertise", true, false}, 1101 {"leafnode_no_advertise", false, true}, 1102 } { 1103 t.Run(test1.name, func(t *testing.T) { 1104 oa := DefaultOptions() 1105 // Make sure we have all ports (client, route, gateway) and we will try 1106 // to create a leafnode to connection to each and make sure we get the error. 1107 oa.Cluster.NoAdvertise = test1.clusterAdvertise 1108 oa.Cluster.Name = "A" 1109 oa.Cluster.Host = "127.0.0.1" 1110 oa.Cluster.Port = -1 1111 oa.Gateway.Host = "127.0.0.1" 1112 oa.Gateway.Port = -1 1113 oa.Gateway.Name = "A" 1114 oa.LeafNode.Host = "127.0.0.1" 1115 oa.LeafNode.Port = -1 1116 oa.LeafNode.NoAdvertise = test1.leafnodeAdvertise 1117 oa.Accounts = []*Account{NewAccount("sys")} 1118 oa.SystemAccount = "sys" 1119 sa := RunServer(oa) 1120 defer sa.Shutdown() 1121 1122 ob := DefaultOptions() 1123 ob.Cluster.NoAdvertise = test1.clusterAdvertise 1124 ob.Cluster.Name = "A" 1125 ob.Cluster.Host = "127.0.0.1" 1126 ob.Cluster.Port = -1 1127 ob.Routes = RoutesFromStr(fmt.Sprintf("nats://%s:%d", oa.Cluster.Host, oa.Cluster.Port)) 1128 ob.Gateway.Host = "127.0.0.1" 1129 ob.Gateway.Port = -1 1130 ob.Gateway.Name = "A" 1131 ob.LeafNode.Host = "127.0.0.1" 1132 ob.LeafNode.Port = -1 1133 ob.LeafNode.NoAdvertise = test1.leafnodeAdvertise 1134 ob.Accounts = []*Account{NewAccount("sys")} 1135 ob.SystemAccount = "sys" 1136 sb := RunServer(ob) 1137 defer sb.Shutdown() 1138 1139 checkClusterFormed(t, sa, sb) 1140 1141 for _, test := range []struct { 1142 name string 1143 port int 1144 }{ 1145 {"client", oa.Port}, 1146 {"cluster", oa.Cluster.Port}, 1147 {"gateway", oa.Gateway.Port}, 1148 } { 1149 t.Run(test.name, func(t *testing.T) { 1150 oc := DefaultOptions() 1151 // Server with the wrong config against non leafnode port. 1152 leafURL, _ := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", test.port)) 1153 oc.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{leafURL}}} 1154 oc.LeafNode.ReconnectInterval = 5 * time.Millisecond 1155 sc := RunServer(oc) 1156 defer sc.Shutdown() 1157 l := &captureErrorLogger{errCh: make(chan string, 10)} 1158 sc.SetLogger(l, true, true) 1159 1160 select { 1161 case e := <-l.errCh: 1162 if strings.Contains(e, ErrConnectedToWrongPort.Error()) { 1163 return 1164 } 1165 case <-time.After(2 * time.Second): 1166 t.Fatalf("Did not get any error about connecting to wrong port for %q - %q", 1167 test1.name, test.name) 1168 } 1169 }) 1170 } 1171 }) 1172 } 1173 } 1174 1175 func TestLeafNodeRemoteIsHub(t *testing.T) { 1176 oa := testDefaultOptionsForGateway("A") 1177 oa.Accounts = []*Account{NewAccount("sys")} 1178 oa.SystemAccount = "sys" 1179 sa := RunServer(oa) 1180 defer sa.Shutdown() 1181 1182 lno := DefaultOptions() 1183 lno.LeafNode.Host = "127.0.0.1" 1184 lno.LeafNode.Port = -1 1185 ln := RunServer(lno) 1186 defer ln.Shutdown() 1187 1188 ob1 := testGatewayOptionsFromToWithServers(t, "B", "A", sa) 1189 ob1.Accounts = []*Account{NewAccount("sys")} 1190 ob1.SystemAccount = "sys" 1191 ob1.Cluster.Host = "127.0.0.1" 1192 ob1.Cluster.Port = -1 1193 ob1.LeafNode.Host = "127.0.0.1" 1194 ob1.LeafNode.Port = -1 1195 u, _ := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", lno.LeafNode.Port)) 1196 ob1.LeafNode.Remotes = []*RemoteLeafOpts{ 1197 { 1198 URLs: []*url.URL{u}, 1199 Hub: true, 1200 }, 1201 } 1202 sb1 := RunServer(ob1) 1203 defer sb1.Shutdown() 1204 1205 waitForOutboundGateways(t, sb1, 1, 2*time.Second) 1206 waitForInboundGateways(t, sb1, 1, 2*time.Second) 1207 waitForOutboundGateways(t, sa, 1, 2*time.Second) 1208 waitForInboundGateways(t, sa, 1, 2*time.Second) 1209 1210 checkLeafNodeConnected(t, sb1) 1211 1212 // For now, due to issue 977, let's restart the leafnode so that the 1213 // leafnode connect is propagated in the super-cluster. 1214 ln.Shutdown() 1215 ln = RunServer(lno) 1216 defer ln.Shutdown() 1217 checkLeafNodeConnected(t, sb1) 1218 1219 // Connect another server in cluster B 1220 ob2 := testGatewayOptionsFromToWithServers(t, "B", "A", sa) 1221 ob2.Accounts = []*Account{NewAccount("sys")} 1222 ob2.SystemAccount = "sys" 1223 ob2.Cluster.Host = "127.0.0.1" 1224 ob2.Cluster.Port = -1 1225 ob2.Routes = RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", ob1.Cluster.Port)) 1226 sb2 := RunServer(ob2) 1227 defer sb2.Shutdown() 1228 1229 checkClusterFormed(t, sb1, sb2) 1230 waitForOutboundGateways(t, sb2, 1, 2*time.Second) 1231 1232 expectedSubs := ln.NumSubscriptions() + 2 1233 1234 // Create sub on "foo" connected to sa 1235 ncA := natsConnect(t, sa.ClientURL()) 1236 defer ncA.Close() 1237 subFoo := natsSubSync(t, ncA, "foo") 1238 1239 // Create sub on "bar" connected to sb2 1240 ncB2 := natsConnect(t, sb2.ClientURL()) 1241 defer ncB2.Close() 1242 subBar := natsSubSync(t, ncB2, "bar") 1243 1244 // Make sure subscriptions have propagated to the leafnode. 1245 checkFor(t, time.Second, 10*time.Millisecond, func() error { 1246 if subs := ln.NumSubscriptions(); subs < expectedSubs { 1247 return fmt.Errorf("Number of subs is %d", subs) 1248 } 1249 return nil 1250 }) 1251 1252 // Create pub connection on leafnode 1253 ncLN := natsConnect(t, ln.ClientURL()) 1254 defer ncLN.Close() 1255 1256 // Publish on foo and make sure it is received. 1257 natsPub(t, ncLN, "foo", []byte("msg")) 1258 natsNexMsg(t, subFoo, time.Second) 1259 1260 // Publish on foo and make sure it is received. 1261 natsPub(t, ncLN, "bar", []byte("msg")) 1262 natsNexMsg(t, subBar, time.Second) 1263 } 1264 1265 func TestLeafNodePermissions(t *testing.T) { 1266 lo1 := DefaultOptions() 1267 lo1.LeafNode.Host = "127.0.0.1" 1268 lo1.LeafNode.Port = -1 1269 ln1 := RunServer(lo1) 1270 defer ln1.Shutdown() 1271 1272 errLog := &captureErrorLogger{errCh: make(chan string, 1)} 1273 ln1.SetLogger(errLog, false, false) 1274 1275 u, _ := url.Parse(fmt.Sprintf("nats://%s:%d", lo1.LeafNode.Host, lo1.LeafNode.Port)) 1276 lo2 := DefaultOptions() 1277 lo2.Cluster.Name = "xyz" 1278 lo2.LeafNode.ReconnectInterval = 5 * time.Millisecond 1279 lo2.LeafNode.connDelay = 100 * time.Millisecond 1280 lo2.LeafNode.Remotes = []*RemoteLeafOpts{ 1281 { 1282 URLs: []*url.URL{u}, 1283 DenyExports: []string{"export.*", "export"}, 1284 DenyImports: []string{"import.*", "import"}, 1285 }, 1286 } 1287 ln2 := RunServer(lo2) 1288 defer ln2.Shutdown() 1289 1290 checkLeafNodeConnected(t, ln1) 1291 1292 // Create clients on ln1 and ln2 1293 nc1, err := nats.Connect(ln1.ClientURL()) 1294 if err != nil { 1295 t.Fatalf("Error creating client: %v", err) 1296 } 1297 defer nc1.Close() 1298 nc2, err := nats.Connect(ln2.ClientURL()) 1299 if err != nil { 1300 t.Fatalf("Error creating client: %v", err) 1301 } 1302 defer nc2.Close() 1303 1304 checkSubs := func(acc *Account, expected int) { 1305 t.Helper() 1306 checkFor(t, time.Second, 15*time.Millisecond, func() error { 1307 if n := acc.TotalSubs(); n != expected { 1308 return fmt.Errorf("Expected %d subs, got %v", expected, n) 1309 } 1310 return nil 1311 }) 1312 } 1313 1314 // Create a sub on ">" on LN1 1315 subAll := natsSubSync(t, nc1, ">") 1316 // this should be registered in LN2 (there is 1 sub for LN1 $LDS subject) + SYS IMPORTS 1317 checkSubs(ln2.globalAccount(), 12) 1318 1319 // Check deny export clause from messages published from LN2 1320 for _, test := range []struct { 1321 name string 1322 subject string 1323 received bool 1324 }{ 1325 {"do not send on export.bat", "export.bat", false}, 1326 {"do not send on export", "export", false}, 1327 {"send on foo", "foo", true}, 1328 {"send on export.this.one", "export.this.one", true}, 1329 } { 1330 t.Run(test.name, func(t *testing.T) { 1331 nc2.Publish(test.subject, []byte("msg")) 1332 if test.received { 1333 natsNexMsg(t, subAll, time.Second) 1334 } else { 1335 if _, err := subAll.NextMsg(50 * time.Millisecond); err == nil { 1336 t.Fatalf("Should not have received message on %q", test.subject) 1337 } 1338 } 1339 }) 1340 } 1341 1342 subAll.Unsubscribe() 1343 // Goes down by 1. 1344 checkSubs(ln2.globalAccount(), 11) 1345 1346 // We used to make sure we would not do subscriptions however that 1347 // was incorrect. We need to check publishes, not the subscriptions. 1348 // For instance if we can publish across a leafnode to foo, and the 1349 // other side has a subsxcription for '*' the message should cross 1350 // the leafnode. The old way would not allow this. 1351 1352 // Now check deny import clause. 1353 // As of now, we don't suppress forwarding of subscriptions on LN2 that 1354 // match the deny import clause to be forwarded to LN1. However, messages 1355 // should still not be able to come back to LN2. 1356 for _, test := range []struct { 1357 name string 1358 subSubject string 1359 pubSubject string 1360 ok bool 1361 }{ 1362 {"reject import on import.*", "import.*", "import.bad", false}, 1363 {"reject import on import", "import", "import", false}, 1364 {"accepts import on foo", "foo", "foo", true}, 1365 {"accepts import on import.this.one.ok", "import.*.>", "import.this.one.ok", true}, 1366 } { 1367 t.Run(test.name, func(t *testing.T) { 1368 sub := natsSubSync(t, nc2, test.subSubject) 1369 checkSubs(ln2.globalAccount(), 12) 1370 1371 if !test.ok { 1372 nc1.Publish(test.pubSubject, []byte("msg")) 1373 if _, err := sub.NextMsg(50 * time.Millisecond); err == nil { 1374 t.Fatalf("Did not expect to get the message") 1375 } 1376 } else { 1377 checkSubs(ln1.globalAccount(), 11) 1378 nc1.Publish(test.pubSubject, []byte("msg")) 1379 natsNexMsg(t, sub, time.Second) 1380 } 1381 sub.Unsubscribe() 1382 checkSubs(ln1.globalAccount(), 10) 1383 }) 1384 } 1385 } 1386 1387 func TestLeafNodePermissionsConcurrentAccess(t *testing.T) { 1388 lo1 := DefaultOptions() 1389 lo1.LeafNode.Host = "127.0.0.1" 1390 lo1.LeafNode.Port = -1 1391 ln1 := RunServer(lo1) 1392 defer ln1.Shutdown() 1393 1394 nc1 := natsConnect(t, ln1.ClientURL()) 1395 defer nc1.Close() 1396 1397 natsSub(t, nc1, "_INBOX.>", func(_ *nats.Msg) {}) 1398 natsFlush(t, nc1) 1399 1400 ch := make(chan struct{}, 1) 1401 wg := sync.WaitGroup{} 1402 wg.Add(2) 1403 1404 publish := func(nc *nats.Conn) { 1405 defer wg.Done() 1406 1407 for { 1408 select { 1409 case <-ch: 1410 return 1411 default: 1412 nc.Publish(nats.NewInbox(), []byte("hello")) 1413 } 1414 } 1415 } 1416 1417 go publish(nc1) 1418 1419 u, _ := url.Parse(fmt.Sprintf("nats://%s:%d", lo1.LeafNode.Host, lo1.LeafNode.Port)) 1420 lo2 := DefaultOptions() 1421 lo2.Cluster.Name = "xyz" 1422 lo2.LeafNode.ReconnectInterval = 5 * time.Millisecond 1423 lo2.LeafNode.connDelay = 500 * time.Millisecond 1424 lo2.LeafNode.Remotes = []*RemoteLeafOpts{ 1425 { 1426 URLs: []*url.URL{u}, 1427 DenyExports: []string{"foo"}, 1428 DenyImports: []string{"bar"}, 1429 }, 1430 } 1431 ln2 := RunServer(lo2) 1432 defer ln2.Shutdown() 1433 1434 nc2 := natsConnect(t, ln2.ClientURL()) 1435 defer nc2.Close() 1436 1437 natsSub(t, nc2, "_INBOX.>", func(_ *nats.Msg) {}) 1438 natsFlush(t, nc2) 1439 1440 go publish(nc2) 1441 1442 checkLeafNodeConnected(t, ln1) 1443 checkLeafNodeConnected(t, ln2) 1444 1445 time.Sleep(50 * time.Millisecond) 1446 close(ch) 1447 wg.Wait() 1448 } 1449 1450 func TestLeafNodePubAllowedPruning(t *testing.T) { 1451 c := &client{} 1452 c.setPermissions(&Permissions{Publish: &SubjectPermission{Allow: []string{"foo"}}}) 1453 1454 gr := 100 1455 wg := sync.WaitGroup{} 1456 wg.Add(gr) 1457 for i := 0; i < gr; i++ { 1458 go func() { 1459 defer wg.Done() 1460 for i := 0; i < 100; i++ { 1461 c.pubAllowed(nats.NewInbox()) 1462 } 1463 }() 1464 } 1465 1466 wg.Wait() 1467 if n := int(atomic.LoadInt32(&c.perms.pcsz)); n > maxPermCacheSize { 1468 t.Fatalf("Expected size to be less than %v, got %v", maxPermCacheSize, n) 1469 } 1470 if n := atomic.LoadInt32(&c.perms.prun); n != 0 { 1471 t.Fatalf("c.perms.prun should be 0, was %v", n) 1472 } 1473 } 1474 1475 func TestLeafNodeExportPermissionsNotForSpecialSubs(t *testing.T) { 1476 lo1 := DefaultOptions() 1477 lo1.Accounts = []*Account{NewAccount("SYS")} 1478 lo1.SystemAccount = "SYS" 1479 lo1.Cluster.Name = "A" 1480 lo1.Gateway.Name = "A" 1481 lo1.Gateway.Port = -1 1482 lo1.LeafNode.Host = "127.0.0.1" 1483 lo1.LeafNode.Port = -1 1484 ln1 := RunServer(lo1) 1485 defer ln1.Shutdown() 1486 1487 u, _ := url.Parse(fmt.Sprintf("nats://%s:%d", lo1.LeafNode.Host, lo1.LeafNode.Port)) 1488 lo2 := DefaultOptions() 1489 lo2.LeafNode.Remotes = []*RemoteLeafOpts{ 1490 { 1491 URLs: []*url.URL{u}, 1492 DenyExports: []string{">"}, 1493 }, 1494 } 1495 ln2 := RunServer(lo2) 1496 defer ln2.Shutdown() 1497 1498 checkLeafNodeConnected(t, ln1) 1499 1500 // The deny is totally restrictive, but make sure that we still accept the $LDS, $GR and _GR_ go from LN1. 1501 checkFor(t, time.Second, 15*time.Millisecond, func() error { 1502 // We should have registered the 3 subs from the accepting leafnode. 1503 if n := ln2.globalAccount().TotalSubs(); n != 9 { 1504 return fmt.Errorf("Expected %d subs, got %v", 9, n) 1505 } 1506 return nil 1507 }) 1508 } 1509 1510 // Make sure that if the node that detects the loop (and sends the error and 1511 // close the connection) is the accept side, the remote node (the one that solicits) 1512 // properly use the reconnect delay. 1513 func TestLeafNodeLoopDetectedOnAcceptSide(t *testing.T) { 1514 bo := DefaultOptions() 1515 bo.LeafNode.Host = "127.0.0.1" 1516 bo.LeafNode.Port = -1 1517 b := RunServer(bo) 1518 defer b.Shutdown() 1519 1520 l := &loopDetectedLogger{ch: make(chan string, 1)} 1521 b.SetLogger(l, false, false) 1522 1523 u, _ := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", bo.LeafNode.Port)) 1524 1525 ao := testDefaultOptionsForGateway("A") 1526 ao.Accounts = []*Account{NewAccount("SYS")} 1527 ao.SystemAccount = "SYS" 1528 ao.LeafNode.ReconnectInterval = 5 * time.Millisecond 1529 ao.LeafNode.Remotes = []*RemoteLeafOpts{ 1530 { 1531 URLs: []*url.URL{u}, 1532 Hub: true, 1533 }, 1534 } 1535 a := RunServer(ao) 1536 defer a.Shutdown() 1537 1538 co := testGatewayOptionsFromToWithServers(t, "C", "A", a) 1539 co.Accounts = []*Account{NewAccount("SYS")} 1540 co.SystemAccount = "SYS" 1541 co.LeafNode.ReconnectInterval = 5 * time.Millisecond 1542 co.LeafNode.Remotes = []*RemoteLeafOpts{ 1543 { 1544 URLs: []*url.URL{u}, 1545 Hub: true, 1546 }, 1547 } 1548 c := RunServer(co) 1549 defer c.Shutdown() 1550 1551 for i := 0; i < 2; i++ { 1552 select { 1553 case <-l.ch: 1554 // OK 1555 case <-time.After(200 * time.Millisecond): 1556 // We are likely to detect from each A and C servers, 1557 // but consider a failure if we did not receive any. 1558 if i == 0 { 1559 t.Fatalf("Should have detected loop") 1560 } 1561 } 1562 } 1563 1564 // The reconnect attempt is set to 5ms, but the default loop delay 1565 // is 30 seconds, so we should not get any new error for that long. 1566 // Check if we are getting more errors.. 1567 select { 1568 case e := <-l.ch: 1569 t.Fatalf("Should not have gotten another error, got %q", e) 1570 case <-time.After(50 * time.Millisecond): 1571 // OK! 1572 } 1573 } 1574 1575 func TestLeafNodeHubWithGateways(t *testing.T) { 1576 ao := DefaultOptions() 1577 ao.ServerName = "A" 1578 ao.LeafNode.Host = "127.0.0.1" 1579 ao.LeafNode.Port = -1 1580 a := RunServer(ao) 1581 defer a.Shutdown() 1582 1583 ua, _ := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", ao.LeafNode.Port)) 1584 1585 bo := testDefaultOptionsForGateway("B") 1586 bo.ServerName = "B" 1587 bo.Accounts = []*Account{NewAccount("SYS")} 1588 bo.SystemAccount = "SYS" 1589 bo.LeafNode.ReconnectInterval = 5 * time.Millisecond 1590 bo.LeafNode.Remotes = []*RemoteLeafOpts{ 1591 { 1592 URLs: []*url.URL{ua}, 1593 Hub: true, 1594 }, 1595 } 1596 b := RunServer(bo) 1597 defer b.Shutdown() 1598 1599 do := DefaultOptions() 1600 do.ServerName = "D" 1601 do.LeafNode.Host = "127.0.0.1" 1602 do.LeafNode.Port = -1 1603 d := RunServer(do) 1604 defer d.Shutdown() 1605 1606 ud, _ := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", do.LeafNode.Port)) 1607 1608 co := testGatewayOptionsFromToWithServers(t, "C", "B", b) 1609 co.ServerName = "C" 1610 co.Accounts = []*Account{NewAccount("SYS")} 1611 co.SystemAccount = "SYS" 1612 co.LeafNode.ReconnectInterval = 5 * time.Millisecond 1613 co.LeafNode.Remotes = []*RemoteLeafOpts{ 1614 { 1615 URLs: []*url.URL{ud}, 1616 Hub: true, 1617 }, 1618 } 1619 c := RunServer(co) 1620 defer c.Shutdown() 1621 1622 waitForInboundGateways(t, b, 1, 2*time.Second) 1623 waitForInboundGateways(t, c, 1, 2*time.Second) 1624 checkLeafNodeConnected(t, a) 1625 checkLeafNodeConnected(t, d) 1626 1627 // Create a responder on D 1628 ncD := natsConnect(t, d.ClientURL()) 1629 defer ncD.Close() 1630 1631 ncD.Subscribe("service", func(m *nats.Msg) { 1632 m.Respond([]byte("reply")) 1633 }) 1634 ncD.Flush() 1635 1636 checkFor(t, time.Second, 15*time.Millisecond, func() error { 1637 acc := a.globalAccount() 1638 if r := acc.sl.Match("service"); r != nil && len(r.psubs) == 1 { 1639 return nil 1640 } 1641 return fmt.Errorf("subscription still not registered") 1642 }) 1643 1644 // Create requestor on A and send the request, expect a reply. 1645 ncA := natsConnect(t, a.ClientURL()) 1646 defer ncA.Close() 1647 if msg, err := ncA.Request("service", []byte("request"), time.Second); err != nil { 1648 t.Fatalf("Failed to get reply: %v", err) 1649 } else if string(msg.Data) != "reply" { 1650 t.Fatalf("Unexpected reply: %q", msg.Data) 1651 } 1652 } 1653 1654 func TestLeafNodeTmpClients(t *testing.T) { 1655 ao := DefaultOptions() 1656 ao.LeafNode.Host = "127.0.0.1" 1657 ao.LeafNode.Port = -1 1658 a := RunServer(ao) 1659 defer a.Shutdown() 1660 1661 c, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", ao.LeafNode.Port)) 1662 if err != nil { 1663 t.Fatalf("Error connecting: %v", err) 1664 } 1665 defer c.Close() 1666 // Read info 1667 br := bufio.NewReader(c) 1668 br.ReadLine() 1669 1670 checkTmp := func(expected int) { 1671 t.Helper() 1672 checkFor(t, time.Second, 15*time.Millisecond, func() error { 1673 a.grMu.Lock() 1674 l := len(a.grTmpClients) 1675 a.grMu.Unlock() 1676 if l != expected { 1677 return fmt.Errorf("Expected tmp map to have %v entries, got %v", expected, l) 1678 } 1679 return nil 1680 }) 1681 } 1682 checkTmp(1) 1683 1684 // Close client and wait check that it is removed. 1685 c.Close() 1686 checkTmp(0) 1687 1688 // Check with normal leafnode connection that once connected, 1689 // the tmp map is also emptied. 1690 bo := DefaultOptions() 1691 bo.Cluster.Name = "xyz" 1692 bo.LeafNode.ReconnectInterval = 5 * time.Millisecond 1693 u, err := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", ao.LeafNode.Port)) 1694 if err != nil { 1695 t.Fatalf("Error creating url: %v", err) 1696 } 1697 bo.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{u}}} 1698 b := RunServer(bo) 1699 defer b.Shutdown() 1700 1701 checkLeafNodeConnected(t, b) 1702 checkTmp(0) 1703 } 1704 1705 func TestLeafNodeTLSVerifyAndMap(t *testing.T) { 1706 accName := "MyAccount" 1707 acc := NewAccount(accName) 1708 certUserName := "CN=example.com,OU=NATS.io" 1709 users := []*User{{Username: certUserName, Account: acc}} 1710 1711 for _, test := range []struct { 1712 name string 1713 leafUsers bool 1714 provideCert bool 1715 }{ 1716 {"no users override, provides cert", false, true}, 1717 {"no users override, does not provide cert", false, false}, 1718 {"users override, provides cert", true, true}, 1719 {"users override, does not provide cert", true, false}, 1720 } { 1721 t.Run(test.name, func(t *testing.T) { 1722 o := DefaultOptions() 1723 o.Accounts = []*Account{acc} 1724 o.LeafNode.Host = "127.0.0.1" 1725 o.LeafNode.Port = -1 1726 if test.leafUsers { 1727 o.LeafNode.Users = users 1728 } else { 1729 o.Users = users 1730 } 1731 tc := &TLSConfigOpts{ 1732 CertFile: "../test/configs/certs/tlsauth/server.pem", 1733 KeyFile: "../test/configs/certs/tlsauth/server-key.pem", 1734 CaFile: "../test/configs/certs/tlsauth/ca.pem", 1735 Verify: true, 1736 } 1737 tlsc, err := GenTLSConfig(tc) 1738 if err != nil { 1739 t.Fatalf("Error creating tls config: %v", err) 1740 } 1741 o.LeafNode.TLSConfig = tlsc 1742 o.LeafNode.TLSMap = true 1743 s := RunServer(o) 1744 defer s.Shutdown() 1745 1746 slo := DefaultOptions() 1747 slo.Cluster.Name = "xyz" 1748 1749 sltlsc := &tls.Config{} 1750 if test.provideCert { 1751 tc := &TLSConfigOpts{ 1752 CertFile: "../test/configs/certs/tlsauth/client.pem", 1753 KeyFile: "../test/configs/certs/tlsauth/client-key.pem", 1754 } 1755 var err error 1756 sltlsc, err = GenTLSConfig(tc) 1757 if err != nil { 1758 t.Fatalf("Error generating tls config: %v", err) 1759 } 1760 } 1761 sltlsc.InsecureSkipVerify = true 1762 u, _ := url.Parse(fmt.Sprintf("nats://%s:%d", o.LeafNode.Host, o.LeafNode.Port)) 1763 slo.LeafNode.Remotes = []*RemoteLeafOpts{ 1764 { 1765 TLSConfig: sltlsc, 1766 URLs: []*url.URL{u}, 1767 }, 1768 } 1769 sl := RunServer(slo) 1770 defer sl.Shutdown() 1771 1772 if !test.provideCert { 1773 // Wait a bit and make sure we are not connecting 1774 time.Sleep(100 * time.Millisecond) 1775 checkLeafNodeConnectedCount(t, s, 0) 1776 return 1777 } 1778 checkLeafNodeConnected(t, s) 1779 1780 var uname string 1781 var accname string 1782 s.mu.Lock() 1783 for _, c := range s.leafs { 1784 c.mu.Lock() 1785 uname = c.opts.Username 1786 if c.acc != nil { 1787 accname = c.acc.GetName() 1788 } 1789 c.mu.Unlock() 1790 } 1791 s.mu.Unlock() 1792 if uname != certUserName { 1793 t.Fatalf("Expected username %q, got %q", certUserName, uname) 1794 } 1795 if accname != accName { 1796 t.Fatalf("Expected account %q, got %v", accName, accname) 1797 } 1798 }) 1799 } 1800 } 1801 1802 type chanLogger struct { 1803 DummyLogger 1804 triggerChan chan string 1805 } 1806 1807 func (l *chanLogger) Warnf(format string, v ...interface{}) { 1808 l.triggerChan <- fmt.Sprintf(format, v...) 1809 } 1810 1811 func (l *chanLogger) Errorf(format string, v ...interface{}) { 1812 l.triggerChan <- fmt.Sprintf(format, v...) 1813 } 1814 1815 const ( 1816 testLeafNodeTLSVerifyAndMapSrvA = ` 1817 listen: 127.0.0.1:-1 1818 leaf { 1819 listen: "127.0.0.1:-1" 1820 tls { 1821 cert_file: "../test/configs/certs/server-cert.pem" 1822 key_file: "../test/configs/certs/server-key.pem" 1823 ca_file: "../test/configs/certs/ca.pem" 1824 timeout: 2 1825 verify_and_map: true 1826 } 1827 authorization { 1828 users [{ 1829 user: "%s" 1830 }] 1831 } 1832 } 1833 ` 1834 testLeafNodeTLSVerifyAndMapSrvB = ` 1835 listen: -1 1836 leaf { 1837 remotes [ 1838 { 1839 url: "tls://user-provided-in-url@localhost:%d" 1840 tls { 1841 cert_file: "../test/configs/certs/server-cert.pem" 1842 key_file: "../test/configs/certs/server-key.pem" 1843 ca_file: "../test/configs/certs/ca.pem" 1844 } 1845 } 1846 ] 1847 }` 1848 ) 1849 1850 func TestLeafNodeTLSVerifyAndMapCfgPass(t *testing.T) { 1851 l := &chanLogger{triggerChan: make(chan string, 100)} 1852 defer close(l.triggerChan) 1853 1854 confA := createConfFile(t, []byte(fmt.Sprintf(testLeafNodeTLSVerifyAndMapSrvA, "localhost"))) 1855 srvA, optsA := RunServerWithConfig(confA) 1856 defer srvA.Shutdown() 1857 srvA.SetLogger(l, true, true) 1858 1859 confB := createConfFile(t, []byte(fmt.Sprintf(testLeafNodeTLSVerifyAndMapSrvB, optsA.LeafNode.Port))) 1860 ob := LoadConfig(confB) 1861 ob.LeafNode.ReconnectInterval = 50 * time.Millisecond 1862 srvB := RunServer(ob) 1863 defer srvB.Shutdown() 1864 1865 // Now make sure that the leaf node connection is up and the correct account was picked 1866 checkFor(t, 10*time.Second, 10*time.Millisecond, func() error { 1867 for _, srv := range []*Server{srvA, srvB} { 1868 if nln := srv.NumLeafNodes(); nln != 1 { 1869 return fmt.Errorf("Number of leaf nodes is %d", nln) 1870 } 1871 if leafz, err := srv.Leafz(nil); err != nil { 1872 if len(leafz.Leafs) != 1 { 1873 return fmt.Errorf("Number of leaf nodes returned by LEAFZ is not one: %d", len(leafz.Leafs)) 1874 } else if leafz.Leafs[0].Account != DEFAULT_GLOBAL_ACCOUNT { 1875 return fmt.Errorf("Account used is not $G: %s", leafz.Leafs[0].Account) 1876 } 1877 } 1878 } 1879 return nil 1880 }) 1881 // Make sure that the user name in the url was ignored and a warning printed 1882 for { 1883 select { 1884 case w := <-l.triggerChan: 1885 if w == `User "user-provided-in-url" found in connect proto, but user required from cert` { 1886 return 1887 } 1888 case <-time.After(2 * time.Second): 1889 t.Fatal("Did not get expected warning") 1890 } 1891 } 1892 } 1893 1894 func TestLeafNodeTLSVerifyAndMapCfgFail(t *testing.T) { 1895 l := &chanLogger{triggerChan: make(chan string, 100)} 1896 defer close(l.triggerChan) 1897 1898 // use certificate with SAN localhost, but configure the server to not accept it 1899 // instead provide a name matching the user (to be matched by failed 1900 confA := createConfFile(t, []byte(fmt.Sprintf(testLeafNodeTLSVerifyAndMapSrvA, "user-provided-in-url"))) 1901 srvA, optsA := RunServerWithConfig(confA) 1902 defer srvA.Shutdown() 1903 srvA.SetLogger(l, true, true) 1904 1905 confB := createConfFile(t, []byte(fmt.Sprintf(testLeafNodeTLSVerifyAndMapSrvB, optsA.LeafNode.Port))) 1906 ob := LoadConfig(confB) 1907 ob.LeafNode.ReconnectInterval = 50 * time.Millisecond 1908 srvB := RunServer(ob) 1909 defer srvB.Shutdown() 1910 1911 // Now make sure that the leaf node connection is down 1912 checkFor(t, 10*time.Second, 10*time.Millisecond, func() error { 1913 for _, srv := range []*Server{srvA, srvB} { 1914 if nln := srv.NumLeafNodes(); nln != 0 { 1915 return fmt.Errorf("Number of leaf nodes is %d", nln) 1916 } 1917 } 1918 return nil 1919 }) 1920 // Make sure that the connection was closed for the right reason 1921 for { 1922 select { 1923 case w := <-l.triggerChan: 1924 if strings.Contains(w, ErrAuthentication.Error()) { 1925 return 1926 } 1927 case <-time.After(2 * time.Second): 1928 t.Fatal("Did not get expected warning") 1929 } 1930 } 1931 } 1932 1933 func TestLeafNodeOriginClusterInfo(t *testing.T) { 1934 hopts := DefaultOptions() 1935 hopts.ServerName = "hub" 1936 hopts.LeafNode.Port = -1 1937 1938 hub := RunServer(hopts) 1939 defer hub.Shutdown() 1940 1941 conf := createConfFile(t, []byte(fmt.Sprintf(` 1942 port: -1 1943 leaf { 1944 remotes [ { url: "nats://127.0.0.1:%d" } ] 1945 } 1946 `, hopts.LeafNode.Port))) 1947 1948 opts, err := ProcessConfigFile(conf) 1949 if err != nil { 1950 t.Fatalf("Error processing config file: %v", err) 1951 } 1952 opts.NoLog, opts.NoSigs = true, true 1953 1954 s := RunServer(opts) 1955 defer s.Shutdown() 1956 1957 checkLeafNodeConnected(t, s) 1958 1959 // Check the info on the leadnode client in the hub. 1960 grabLeaf := func() *client { 1961 var l *client 1962 hub.mu.Lock() 1963 for _, l = range hub.leafs { 1964 break 1965 } 1966 hub.mu.Unlock() 1967 return l 1968 } 1969 1970 l := grabLeaf() 1971 if rc := l.remoteCluster(); rc != "" { 1972 t.Fatalf("Expected an empty remote cluster, got %q", rc) 1973 } 1974 1975 s.Shutdown() 1976 1977 // Now make our leafnode part of a cluster. 1978 conf = createConfFile(t, []byte(fmt.Sprintf(` 1979 port: -1 1980 leaf { 1981 remotes [ { url: "nats://127.0.0.1:%d" } ] 1982 } 1983 cluster { 1984 name: "xyz" 1985 listen: "127.0.0.1:-1" 1986 } 1987 `, hopts.LeafNode.Port))) 1988 1989 opts, err = ProcessConfigFile(conf) 1990 if err != nil { 1991 t.Fatalf("Error processing config file: %v", err) 1992 } 1993 opts.NoLog, opts.NoSigs = true, true 1994 1995 s = RunServer(opts) 1996 defer s.Shutdown() 1997 1998 checkLeafNodeConnected(t, s) 1999 2000 l = grabLeaf() 2001 if rc := l.remoteCluster(); rc != "xyz" { 2002 t.Fatalf("Expected a remote cluster name of \"xyz\", got %q", rc) 2003 } 2004 pcid := l.cid 2005 2006 // Now make sure that if we update our cluster name, simulating the settling 2007 // of dynamic cluster names between competing servers. 2008 s.setClusterName("xyz") 2009 // Make sure we disconnect and reconnect. 2010 checkLeafNodeConnectedCount(t, s, 0) 2011 checkLeafNodeConnected(t, s) 2012 checkLeafNodeConnected(t, hub) 2013 2014 l = grabLeaf() 2015 if rc := l.remoteCluster(); rc != "xyz" { 2016 t.Fatalf("Expected a remote cluster name of \"xyz\", got %q", rc) 2017 } 2018 // Make sure we reconnected and have a new CID. 2019 if l.cid == pcid { 2020 t.Fatalf("Expected a different id, got the same") 2021 } 2022 } 2023 2024 type proxyAcceptDetectFailureLate struct { 2025 sync.Mutex 2026 wg sync.WaitGroup 2027 acceptPort int 2028 l net.Listener 2029 srvs []net.Conn 2030 leaf net.Conn 2031 startChan chan struct{} 2032 } 2033 2034 func (p *proxyAcceptDetectFailureLate) run(t *testing.T) int { 2035 return p.runEx(t, false) 2036 } 2037 2038 func (p *proxyAcceptDetectFailureLate) runEx(t *testing.T, needStart bool) int { 2039 l, err := natsListen("tcp", "127.0.0.1:0") 2040 if err != nil { 2041 t.Fatalf("Error on listen: %v", err) 2042 } 2043 p.Lock() 2044 var startChan chan struct{} 2045 if needStart { 2046 startChan = make(chan struct{}) 2047 p.startChan = startChan 2048 } 2049 p.l = l 2050 p.Unlock() 2051 port := l.Addr().(*net.TCPAddr).Port 2052 p.wg.Add(1) 2053 go func() { 2054 defer p.wg.Done() 2055 defer l.Close() 2056 defer func() { 2057 p.Lock() 2058 for _, c := range p.srvs { 2059 c.Close() 2060 } 2061 p.Unlock() 2062 }() 2063 if startChan != nil { 2064 <-startChan 2065 } 2066 for { 2067 c, err := l.Accept() 2068 if err != nil { 2069 return 2070 } 2071 srv, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", p.acceptPort)) 2072 if err != nil { 2073 return 2074 } 2075 p.Lock() 2076 p.leaf = c 2077 p.srvs = append(p.srvs, srv) 2078 p.Unlock() 2079 2080 transfer := func(c1, c2 net.Conn) { 2081 var buf [1024]byte 2082 for { 2083 n, err := c1.Read(buf[:]) 2084 if err != nil { 2085 return 2086 } 2087 if _, err := c2.Write(buf[:n]); err != nil { 2088 return 2089 } 2090 } 2091 } 2092 2093 go transfer(srv, c) 2094 go transfer(c, srv) 2095 } 2096 }() 2097 return port 2098 } 2099 2100 func (p *proxyAcceptDetectFailureLate) start() { 2101 p.Lock() 2102 if p.startChan != nil { 2103 close(p.startChan) 2104 p.startChan = nil 2105 } 2106 p.Unlock() 2107 } 2108 2109 func (p *proxyAcceptDetectFailureLate) close() { 2110 p.Lock() 2111 if p.startChan != nil { 2112 close(p.startChan) 2113 p.startChan = nil 2114 } 2115 p.l.Close() 2116 p.Unlock() 2117 2118 p.wg.Wait() 2119 } 2120 2121 type oldConnReplacedLogger struct { 2122 DummyLogger 2123 errCh chan string 2124 warnCh chan string 2125 } 2126 2127 func (l *oldConnReplacedLogger) Errorf(format string, v ...interface{}) { 2128 select { 2129 case l.errCh <- fmt.Sprintf(format, v...): 2130 default: 2131 } 2132 } 2133 2134 func (l *oldConnReplacedLogger) Warnf(format string, v ...interface{}) { 2135 select { 2136 case l.warnCh <- fmt.Sprintf(format, v...): 2137 default: 2138 } 2139 } 2140 2141 // This test will simulate that the accept side does not detect the connection 2142 // has been closed early enough. The soliciting side will attempt to reconnect 2143 // and we should not be getting the "loop detected" error. 2144 func TestLeafNodeLoopDetectedDueToReconnect(t *testing.T) { 2145 o := DefaultOptions() 2146 o.LeafNode.Host = "127.0.0.1" 2147 o.LeafNode.Port = -1 2148 s := RunServer(o) 2149 defer s.Shutdown() 2150 2151 l := &oldConnReplacedLogger{errCh: make(chan string, 10), warnCh: make(chan string, 10)} 2152 s.SetLogger(l, false, false) 2153 2154 p := &proxyAcceptDetectFailureLate{acceptPort: o.LeafNode.Port} 2155 defer p.close() 2156 port := p.run(t) 2157 2158 aurl, err := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", port)) 2159 if err != nil { 2160 t.Fatalf("Error parsing url: %v", err) 2161 } 2162 ol := DefaultOptions() 2163 ol.Cluster.Name = "cde" 2164 ol.LeafNode.ReconnectInterval = 50 * time.Millisecond 2165 ol.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{aurl}}} 2166 sl := RunServer(ol) 2167 defer sl.Shutdown() 2168 2169 checkLeafNodeConnected(t, s) 2170 checkLeafNodeConnected(t, sl) 2171 2172 // Cause disconnect client side... 2173 p.Lock() 2174 p.leaf.Close() 2175 p.Unlock() 2176 2177 // Make sure we did not get the loop detected error 2178 select { 2179 case e := <-l.errCh: 2180 if strings.Contains(e, "Loop detected") { 2181 t.Fatalf("Loop detected: %v", e) 2182 } 2183 case <-time.After(250 * time.Millisecond): 2184 // We are ok 2185 } 2186 2187 // Now make sure we got the warning 2188 select { 2189 case w := <-l.warnCh: 2190 if !strings.Contains(w, "Replacing connection from same server") { 2191 t.Fatalf("Unexpected warning: %v", w) 2192 } 2193 case <-time.After(time.Second): 2194 t.Fatal("Did not get expected warning") 2195 } 2196 2197 checkLeafNodeConnected(t, s) 2198 checkLeafNodeConnected(t, sl) 2199 } 2200 2201 func TestLeafNodeTwoRemotesBindToSameHubAccount(t *testing.T) { 2202 opts := DefaultOptions() 2203 opts.LeafNode.Host = "127.0.0.1" 2204 opts.LeafNode.Port = -1 2205 s := RunServer(opts) 2206 defer s.Shutdown() 2207 2208 for _, test := range []struct { 2209 name string 2210 account string 2211 fail bool 2212 }{ 2213 {"different local accounts", "b", false}, 2214 {"same local accounts", "a", true}, 2215 } { 2216 t.Run(test.name, func(t *testing.T) { 2217 conf := ` 2218 listen: 127.0.0.1:-1 2219 cluster { name: ln22, listen: 127.0.0.1:-1 } 2220 accounts { 2221 a { users [ {user: a, password: a} ]} 2222 b { users [ {user: b, password: b} ]} 2223 } 2224 leafnodes { 2225 remotes = [ 2226 { 2227 url: nats-leaf://127.0.0.1:%[1]d 2228 account: a 2229 } 2230 { 2231 url: nats-leaf://127.0.0.1:%[1]d 2232 account: %s 2233 } 2234 ] 2235 } 2236 ` 2237 lconf := createConfFile(t, []byte(fmt.Sprintf(conf, opts.LeafNode.Port, test.account))) 2238 2239 lopts, err := ProcessConfigFile(lconf) 2240 if err != nil { 2241 t.Fatalf("Error loading config file: %v", err) 2242 } 2243 lopts.NoLog = false 2244 ln, err := NewServer(lopts) 2245 if err != nil { 2246 t.Fatalf("Error creating server: %v", err) 2247 } 2248 defer ln.Shutdown() 2249 l := &captureErrorLogger{errCh: make(chan string, 10)} 2250 ln.SetLogger(l, false, false) 2251 2252 wg := sync.WaitGroup{} 2253 wg.Add(1) 2254 go func() { 2255 defer wg.Done() 2256 ln.Start() 2257 }() 2258 2259 select { 2260 case err := <-l.errCh: 2261 if test.fail && !strings.Contains(err, DuplicateRemoteLeafnodeConnection.String()) { 2262 t.Fatalf("Did not get expected duplicate connection error: %v", err) 2263 } else if !test.fail && strings.Contains(err, DuplicateRemoteLeafnodeConnection.String()) { 2264 t.Fatalf("Incorrectly detected a duplicate connection: %v", err) 2265 } 2266 case <-time.After(250 * time.Millisecond): 2267 if test.fail { 2268 t.Fatal("Did not get expected error") 2269 } 2270 } 2271 ln.Shutdown() 2272 wg.Wait() 2273 }) 2274 } 2275 } 2276 2277 func TestLeafNodeNoDuplicateWithinCluster(t *testing.T) { 2278 // This set the cluster name to "abc" 2279 oSrv1 := DefaultOptions() 2280 oSrv1.ServerName = "srv1" 2281 oSrv1.LeafNode.Host = "127.0.0.1" 2282 oSrv1.LeafNode.Port = -1 2283 srv1 := RunServer(oSrv1) 2284 defer srv1.Shutdown() 2285 2286 u, err := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", oSrv1.LeafNode.Port)) 2287 if err != nil { 2288 t.Fatalf("Error parsing url: %v", err) 2289 } 2290 2291 oLeaf1 := DefaultOptions() 2292 oLeaf1.ServerName = "leaf1" 2293 oLeaf1.Cluster.Name = "xyz" 2294 oLeaf1.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{u}}} 2295 leaf1 := RunServer(oLeaf1) 2296 defer leaf1.Shutdown() 2297 2298 leaf1ClusterURL := fmt.Sprintf("nats://127.0.0.1:%d", oLeaf1.Cluster.Port) 2299 2300 oLeaf2 := DefaultOptions() 2301 oLeaf2.ServerName = "leaf2" 2302 oLeaf2.Cluster.Name = "xyz" 2303 oLeaf2.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{u}}} 2304 oLeaf2.Routes = RoutesFromStr(leaf1ClusterURL) 2305 leaf2 := RunServer(oLeaf2) 2306 defer leaf2.Shutdown() 2307 2308 checkClusterFormed(t, leaf1, leaf2) 2309 2310 checkLeafNodeConnectedCount(t, srv1, 2) 2311 checkLeafNodeConnected(t, leaf1) 2312 checkLeafNodeConnected(t, leaf2) 2313 2314 ncSrv1 := natsConnect(t, srv1.ClientURL()) 2315 defer ncSrv1.Close() 2316 natsQueueSub(t, ncSrv1, "foo", "queue", func(m *nats.Msg) { 2317 m.Respond([]byte("from srv1")) 2318 }) 2319 2320 ncLeaf1 := natsConnect(t, leaf1.ClientURL()) 2321 defer ncLeaf1.Close() 2322 natsQueueSub(t, ncLeaf1, "foo", "queue", func(m *nats.Msg) { 2323 m.Respond([]byte("from leaf1")) 2324 }) 2325 2326 ncLeaf2 := natsConnect(t, leaf2.ClientURL()) 2327 defer ncLeaf2.Close() 2328 2329 // Check that "foo" interest is available everywhere. 2330 // For this test, we want to make sure that the 2 queue subs are 2331 // registered on all servers, so we don't use checkSubInterest 2332 // which would simply return "true" if there is any interest on "foo". 2333 servers := []*Server{srv1, leaf1, leaf2} 2334 checkFor(t, time.Second, 15*time.Millisecond, func() error { 2335 for _, s := range servers { 2336 acc, err := s.LookupAccount(globalAccountName) 2337 if err != nil { 2338 return err 2339 } 2340 acc.mu.RLock() 2341 r := acc.sl.Match("foo") 2342 ok := len(r.qsubs) == 1 && len(r.qsubs[0]) == 2 2343 acc.mu.RUnlock() 2344 if !ok { 2345 return fmt.Errorf("interest not propagated on %q", s.Name()) 2346 } 2347 } 2348 return nil 2349 }) 2350 2351 // Send requests (from leaf2). For this test to make sure that 2352 // there is no duplicate, we want to make sure that we check for 2353 // multiple replies and that the reply subject subscription has 2354 // been propagated everywhere. 2355 sub := natsSubSync(t, ncLeaf2, "reply_subj") 2356 natsFlush(t, ncLeaf2) 2357 2358 // Here we have a single sub on "reply_subj" so using checkSubInterest is ok. 2359 checkSubInterest(t, srv1, globalAccountName, "reply_subj", time.Second) 2360 checkSubInterest(t, leaf1, globalAccountName, "reply_subj", time.Second) 2361 checkSubInterest(t, leaf2, globalAccountName, "reply_subj", time.Second) 2362 2363 for i := 0; i < 5; i++ { 2364 // Now send the request 2365 natsPubReq(t, ncLeaf2, "foo", sub.Subject, []byte("req")) 2366 // Check that we get the reply 2367 replyMsg := natsNexMsg(t, sub, time.Second) 2368 // But make sure we received only 1! 2369 if otherReply, _ := sub.NextMsg(100 * time.Millisecond); otherReply != nil { 2370 t.Fatalf("Received duplicate reply, first was %q, followed by %q", 2371 replyMsg.Data, otherReply.Data) 2372 } 2373 // We also should have preferred the queue sub that is in the leaf cluster. 2374 if string(replyMsg.Data) != "from leaf1" { 2375 t.Fatalf("Expected reply from leaf1, got %q", replyMsg.Data) 2376 } 2377 } 2378 } 2379 2380 func TestLeafNodeLMsgSplit(t *testing.T) { 2381 // This set the cluster name to "abc" 2382 oSrv1 := DefaultOptions() 2383 oSrv1.LeafNode.Host = "127.0.0.1" 2384 oSrv1.LeafNode.Port = -1 2385 srv1 := RunServer(oSrv1) 2386 defer srv1.Shutdown() 2387 2388 oSrv2 := DefaultOptions() 2389 oSrv2.LeafNode.Host = "127.0.0.1" 2390 oSrv2.LeafNode.Port = -1 2391 oSrv2.Routes = RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", oSrv1.Cluster.Port)) 2392 srv2 := RunServer(oSrv2) 2393 defer srv2.Shutdown() 2394 2395 checkClusterFormed(t, srv1, srv2) 2396 2397 u1, err := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", oSrv1.LeafNode.Port)) 2398 if err != nil { 2399 t.Fatalf("Error parsing url: %v", err) 2400 } 2401 u2, err := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", oSrv2.LeafNode.Port)) 2402 if err != nil { 2403 t.Fatalf("Error parsing url: %v", err) 2404 } 2405 2406 oLeaf1 := DefaultOptions() 2407 oLeaf1.Cluster.Name = "xyz" 2408 oLeaf1.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{u1, u2}}} 2409 leaf1 := RunServer(oLeaf1) 2410 defer leaf1.Shutdown() 2411 2412 oLeaf2 := DefaultOptions() 2413 oLeaf2.Cluster.Name = "xyz" 2414 oLeaf2.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{u1, u2}}} 2415 oLeaf2.Routes = RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", oLeaf1.Cluster.Port)) 2416 leaf2 := RunServer(oLeaf2) 2417 defer leaf2.Shutdown() 2418 2419 checkClusterFormed(t, leaf1, leaf2) 2420 2421 checkLeafNodeConnected(t, leaf1) 2422 checkLeafNodeConnected(t, leaf2) 2423 2424 ncSrv2 := natsConnect(t, srv2.ClientURL()) 2425 defer ncSrv2.Close() 2426 natsQueueSub(t, ncSrv2, "foo", "queue", func(m *nats.Msg) { 2427 m.Respond([]byte("from srv2")) 2428 }) 2429 2430 // Check that "foo" interest is available everywhere. 2431 checkSubInterest(t, srv1, globalAccountName, "foo", time.Second) 2432 checkSubInterest(t, srv2, globalAccountName, "foo", time.Second) 2433 checkSubInterest(t, leaf1, globalAccountName, "foo", time.Second) 2434 checkSubInterest(t, leaf2, globalAccountName, "foo", time.Second) 2435 2436 // Not required, but have a request payload that is more than 100 bytes 2437 reqPayload := make([]byte, 150) 2438 for i := 0; i < len(reqPayload); i++ { 2439 reqPayload[i] = byte((i % 26)) + 'A' 2440 } 2441 2442 // Send repeated requests (from scratch) from leaf-2: 2443 sendReq := func() { 2444 t.Helper() 2445 2446 ncLeaf2 := natsConnect(t, leaf2.ClientURL()) 2447 defer ncLeaf2.Close() 2448 2449 if _, err := ncLeaf2.Request("foo", reqPayload, time.Second); err != nil { 2450 t.Fatalf("Did not receive reply: %v", err) 2451 } 2452 } 2453 for i := 0; i < 100; i++ { 2454 sendReq() 2455 } 2456 } 2457 2458 type parseRouteLSUnsubLogger struct { 2459 DummyLogger 2460 gotTrace chan struct{} 2461 gotErr chan error 2462 } 2463 2464 func (l *parseRouteLSUnsubLogger) Errorf(format string, v ...interface{}) { 2465 err := fmt.Errorf(format, v...) 2466 select { 2467 case l.gotErr <- err: 2468 default: 2469 } 2470 } 2471 2472 func (l *parseRouteLSUnsubLogger) Tracef(format string, v ...interface{}) { 2473 trace := fmt.Sprintf(format, v...) 2474 if strings.Contains(trace, "LS- $G foo bar") { 2475 l.gotTrace <- struct{}{} 2476 } 2477 } 2478 2479 func TestLeafNodeRouteParseLSUnsub(t *testing.T) { 2480 // This set the cluster name to "abc" 2481 oSrv1 := DefaultOptions() 2482 oSrv1.LeafNode.Host = "127.0.0.1" 2483 oSrv1.LeafNode.Port = -1 2484 srv1 := RunServer(oSrv1) 2485 defer srv1.Shutdown() 2486 2487 l := &parseRouteLSUnsubLogger{gotTrace: make(chan struct{}, 1), gotErr: make(chan error, 1)} 2488 srv1.SetLogger(l, true, true) 2489 2490 oSrv2 := DefaultOptions() 2491 oSrv2.LeafNode.Host = "127.0.0.1" 2492 oSrv2.LeafNode.Port = -1 2493 oSrv2.Routes = RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", oSrv1.Cluster.Port)) 2494 srv2 := RunServer(oSrv2) 2495 defer srv2.Shutdown() 2496 2497 checkClusterFormed(t, srv1, srv2) 2498 2499 u2, err := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", oSrv2.LeafNode.Port)) 2500 if err != nil { 2501 t.Fatalf("Error parsing url: %v", err) 2502 } 2503 2504 oLeaf2 := DefaultOptions() 2505 oLeaf2.Cluster.Name = "xyz" 2506 oLeaf2.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{u2}}} 2507 leaf2 := RunServer(oLeaf2) 2508 defer leaf2.Shutdown() 2509 2510 checkLeafNodeConnected(t, srv2) 2511 checkLeafNodeConnected(t, leaf2) 2512 2513 ncLeaf2 := natsConnect(t, leaf2.ClientURL()) 2514 defer ncLeaf2.Close() 2515 2516 sub := natsQueueSubSync(t, ncLeaf2, "foo", "bar") 2517 // The issue was with the unsubscribe of this queue subscription 2518 natsUnsub(t, sub) 2519 2520 // We should get the trace 2521 select { 2522 case <-l.gotTrace: 2523 // OK! 2524 case <-time.After(100 * time.Millisecond): 2525 t.Fatalf("Did not get LS- trace") 2526 } 2527 // And no error... 2528 select { 2529 case e := <-l.gotErr: 2530 t.Fatalf("There was an error on server 1: %q", e.Error()) 2531 case <-time.After(100 * time.Millisecond): 2532 // OK! 2533 } 2534 } 2535 2536 func TestLeafNodeOperatorBadCfg(t *testing.T) { 2537 sysAcc, err := nkeys.CreateAccount() 2538 require_NoError(t, err) 2539 sysAccPk, err := sysAcc.PublicKey() 2540 require_NoError(t, err) 2541 tmpDir := t.TempDir() 2542 2543 configTmpl := ` 2544 port: -1 2545 operator: %s 2546 system_account: %s 2547 resolver: { 2548 type: cache 2549 dir: '%s' 2550 } 2551 leafnodes: { 2552 %s 2553 } 2554 ` 2555 2556 cases := []struct { 2557 name string 2558 errorText string 2559 cfg string 2560 }{ 2561 { 2562 name: "Operator with Leafnode", 2563 errorText: "operator mode does not allow specifying users in leafnode config", 2564 cfg: ` 2565 port: -1 2566 authorization { 2567 users = [{user: "u", password: "p"}] 2568 }`, 2569 }, 2570 { 2571 name: "Operator with NKey", 2572 errorText: "operator mode and non account nkeys are incompatible", 2573 cfg: ` 2574 port: -1 2575 authorization { 2576 account: notankey 2577 }`, 2578 }, 2579 { 2580 name: "Operator remote account NKeys", 2581 errorText: "operator mode requires account nkeys in remotes. " + 2582 "Please add an `account` key to each remote in your `leafnodes` section, to assign it to an account. " + 2583 "Each account value should be a 56 character public key, starting with the letter 'A'", 2584 cfg: `remotes: [{url: u}]`, 2585 }, 2586 } 2587 for _, c := range cases { 2588 t.Run(c.name, func(t *testing.T) { 2589 2590 conf := createConfFile(t, []byte(fmt.Sprintf(configTmpl, ojwt, sysAccPk, tmpDir, c.cfg))) 2591 opts := LoadConfig(conf) 2592 s, err := NewServer(opts) 2593 if err == nil { 2594 s.Shutdown() 2595 t.Fatal("Expected an error") 2596 } 2597 // Since the server cannot be stopped, since it did not start, 2598 // let's manually close the account resolver to avoid leaking go routines. 2599 opts.AccountResolver.Close() 2600 if err.Error() != c.errorText { 2601 t.Fatalf("Expected error %q but got %q", c.errorText, err) 2602 } 2603 }) 2604 } 2605 } 2606 2607 func TestLeafNodeTLSConfigReload(t *testing.T) { 2608 template := ` 2609 listen: 127.0.0.1:-1 2610 leaf { 2611 listen: "127.0.0.1:-1" 2612 tls { 2613 cert_file: "../test/configs/certs/server-cert.pem" 2614 key_file: "../test/configs/certs/server-key.pem" 2615 %s 2616 timeout: 2 2617 verify: true 2618 } 2619 } 2620 ` 2621 confA := createConfFile(t, []byte(fmt.Sprintf(template, ""))) 2622 2623 srvA, optsA := RunServerWithConfig(confA) 2624 defer srvA.Shutdown() 2625 2626 lg := &captureErrorLogger{errCh: make(chan string, 10)} 2627 srvA.SetLogger(lg, false, false) 2628 2629 confB := createConfFile(t, []byte(fmt.Sprintf(` 2630 listen: -1 2631 leaf { 2632 remotes [ 2633 { 2634 url: "tls://127.0.0.1:%d" 2635 tls { 2636 cert_file: "../test/configs/certs/server-cert.pem" 2637 key_file: "../test/configs/certs/server-key.pem" 2638 ca_file: "../test/configs/certs/ca.pem" 2639 } 2640 } 2641 ] 2642 } 2643 `, optsA.LeafNode.Port))) 2644 2645 optsB, err := ProcessConfigFile(confB) 2646 if err != nil { 2647 t.Fatalf("Error processing config file: %v", err) 2648 } 2649 optsB.LeafNode.ReconnectInterval = 50 * time.Millisecond 2650 optsB.NoLog, optsB.NoSigs = true, true 2651 2652 srvB := RunServer(optsB) 2653 defer srvB.Shutdown() 2654 2655 // Wait for the error 2656 select { 2657 case err := <-lg.errCh: 2658 // Since Go 1.18, we had to regenerate certs to not have to use GODEBUG="x509sha1=1" 2659 // But on macOS, with our test CA certs, no SCTs included, it will fail 2660 // for the reason "x509: “localhost” certificate is not standards compliant" 2661 // instead of "unknown authority". 2662 if !strings.Contains(err, "unknown") && !strings.Contains(err, "compliant") { 2663 t.Fatalf("Unexpected error: %v", err) 2664 } 2665 case <-time.After(2 * time.Second): 2666 t.Fatalf("Did not get TLS error") 2667 } 2668 2669 // Add the CA to srvA 2670 reloadUpdateConfig(t, srvA, confA, fmt.Sprintf(template, `ca_file: "../test/configs/certs/ca.pem"`)) 2671 2672 // Now make sure that srvB can create a LN connection. 2673 checkFor(t, 3*time.Second, 10*time.Millisecond, func() error { 2674 if nln := srvB.NumLeafNodes(); nln != 1 { 2675 return fmt.Errorf("Number of leaf nodes is %d", nln) 2676 } 2677 return nil 2678 }) 2679 } 2680 2681 func TestLeafNodeTLSConfigReloadForRemote(t *testing.T) { 2682 confA := createConfFile(t, []byte(` 2683 listen: 127.0.0.1:-1 2684 leaf { 2685 listen: "127.0.0.1:-1" 2686 tls { 2687 cert_file: "../test/configs/certs/server-cert.pem" 2688 key_file: "../test/configs/certs/server-key.pem" 2689 ca_file: "../test/configs/certs/ca.pem" 2690 timeout: 2 2691 verify: true 2692 } 2693 } 2694 `)) 2695 2696 srvA, optsA := RunServerWithConfig(confA) 2697 defer srvA.Shutdown() 2698 2699 lg := &captureErrorLogger{errCh: make(chan string, 10)} 2700 srvA.SetLogger(lg, false, false) 2701 2702 template := ` 2703 listen: -1 2704 leaf { 2705 remotes [ 2706 { 2707 url: "tls://127.0.0.1:%d" 2708 tls { 2709 cert_file: "../test/configs/certs/server-cert.pem" 2710 key_file: "../test/configs/certs/server-key.pem" 2711 %s 2712 } 2713 } 2714 ] 2715 } 2716 ` 2717 confB := createConfFile(t, []byte(fmt.Sprintf(template, optsA.LeafNode.Port, ""))) 2718 2719 srvB, _ := RunServerWithConfig(confB) 2720 defer srvB.Shutdown() 2721 2722 // Wait for the error 2723 select { 2724 case err := <-lg.errCh: 2725 if !strings.Contains(err, "bad certificate") { 2726 t.Fatalf("Unexpected error: %v", err) 2727 } 2728 case <-time.After(2 * time.Second): 2729 t.Fatalf("Did not get TLS error") 2730 } 2731 2732 // Add the CA to srvB 2733 reloadUpdateConfig(t, srvB, confB, fmt.Sprintf(template, optsA.LeafNode.Port, `ca_file: "../test/configs/certs/ca.pem"`)) 2734 2735 // Now make sure that srvB can create a LN connection. 2736 checkFor(t, 2*time.Second, 10*time.Millisecond, func() error { 2737 if nln := srvB.NumLeafNodes(); nln != 1 { 2738 return fmt.Errorf("Number of leaf nodes is %d", nln) 2739 } 2740 return nil 2741 }) 2742 } 2743 2744 func testDefaultLeafNodeWSOptions() *Options { 2745 o := DefaultOptions() 2746 o.Websocket.Host = "127.0.0.1" 2747 o.Websocket.Port = -1 2748 o.Websocket.NoTLS = true 2749 o.LeafNode.Host = "127.0.0.1" 2750 o.LeafNode.Port = -1 2751 return o 2752 } 2753 2754 func testDefaultRemoteLeafNodeWSOptions(t *testing.T, o *Options, tls bool) *Options { 2755 // Use some path in the URL.. we don't use that, but internally 2756 // the server will prefix the path with /leafnode so that the 2757 // WS webserver knows that it needs to create a LEAF connection. 2758 u, _ := url.Parse(fmt.Sprintf("ws://127.0.0.1:%d/some/path", o.Websocket.Port)) 2759 lo := DefaultOptions() 2760 lo.Cluster.Name = "LN" 2761 remote := &RemoteLeafOpts{URLs: []*url.URL{u}} 2762 if tls { 2763 tc := &TLSConfigOpts{ 2764 CertFile: "../test/configs/certs/server-cert.pem", 2765 KeyFile: "../test/configs/certs/server-key.pem", 2766 CaFile: "../test/configs/certs/ca.pem", 2767 } 2768 tlsConf, err := GenTLSConfig(tc) 2769 if err != nil { 2770 t.Fatalf("Error generating TLS config: %v", err) 2771 } 2772 // GenTLSConfig sets the CA in ClientCAs, but since here we act 2773 // as a client, set RootCAs... 2774 tlsConf.RootCAs = tlsConf.ClientCAs 2775 remote.TLSConfig = tlsConf 2776 } 2777 lo.LeafNode.Remotes = []*RemoteLeafOpts{remote} 2778 return lo 2779 } 2780 2781 func TestLeafNodeWSMixURLs(t *testing.T) { 2782 for _, test := range []struct { 2783 name string 2784 urls []string 2785 }{ 2786 {"mix 1", []string{"nats://127.0.0.1:1234", "ws://127.0.0.1:5678", "wss://127.0.0.1:9012"}}, 2787 {"mix 2", []string{"ws://127.0.0.1:1234", "nats://127.0.0.1:5678", "wss://127.0.0.1:9012"}}, 2788 {"mix 3", []string{"wss://127.0.0.1:1234", "ws://127.0.0.1:5678", "nats://127.0.0.1:9012"}}, 2789 {"mix 4", []string{"ws://127.0.0.1:1234", "nats://127.0.0.1:9012"}}, 2790 {"mix 5", []string{"nats://127.0.0.1:1234", "ws://127.0.0.1:9012"}}, 2791 {"mix 6", []string{"wss://127.0.0.1:1234", "nats://127.0.0.1:9012"}}, 2792 {"mix 7", []string{"nats://127.0.0.1:1234", "wss://127.0.0.1:9012"}}, 2793 } { 2794 t.Run(test.name, func(t *testing.T) { 2795 o := DefaultOptions() 2796 remote := &RemoteLeafOpts{} 2797 urls := make([]*url.URL, 0, 3) 2798 for _, ustr := range test.urls { 2799 u, err := url.Parse(ustr) 2800 if err != nil { 2801 t.Fatalf("Error parsing url: %v", err) 2802 } 2803 urls = append(urls, u) 2804 } 2805 remote.URLs = urls 2806 o.LeafNode.Remotes = []*RemoteLeafOpts{remote} 2807 s, err := NewServer(o) 2808 if err == nil || !strings.Contains(err.Error(), "mix") { 2809 if s != nil { 2810 s.Shutdown() 2811 } 2812 t.Fatalf("Unexpected error: %v", err) 2813 } 2814 }) 2815 } 2816 } 2817 2818 type testConnTrackSize struct { 2819 sync.Mutex 2820 net.Conn 2821 sz int 2822 } 2823 2824 func (c *testConnTrackSize) Write(p []byte) (int, error) { 2825 c.Lock() 2826 defer c.Unlock() 2827 n, err := c.Conn.Write(p) 2828 c.sz += n 2829 return n, err 2830 } 2831 2832 func TestLeafNodeWSBasic(t *testing.T) { 2833 for _, test := range []struct { 2834 name string 2835 masking bool 2836 tls bool 2837 acceptCompression bool 2838 remoteCompression bool 2839 }{ 2840 {"masking plain no compression", true, false, false, false}, 2841 {"masking plain compression", true, false, true, true}, 2842 {"masking plain compression disagree", true, false, false, true}, 2843 {"masking plain compression disagree 2", true, false, true, false}, 2844 {"masking tls no compression", true, true, false, false}, 2845 {"masking tls compression", true, true, true, true}, 2846 {"masking tls compression disagree", true, true, false, true}, 2847 {"masking tls compression disagree 2", true, true, true, false}, 2848 {"no masking plain no compression", false, false, false, false}, 2849 {"no masking plain compression", false, false, true, true}, 2850 {"no masking plain compression disagree", false, false, false, true}, 2851 {"no masking plain compression disagree 2", false, false, true, false}, 2852 {"no masking tls no compression", false, true, false, false}, 2853 {"no masking tls compression", false, true, true, true}, 2854 {"no masking tls compression disagree", false, true, false, true}, 2855 {"no masking tls compression disagree 2", false, true, true, false}, 2856 } { 2857 t.Run(test.name, func(t *testing.T) { 2858 o := testDefaultLeafNodeWSOptions() 2859 o.Websocket.NoTLS = !test.tls 2860 if test.tls { 2861 tc := &TLSConfigOpts{ 2862 CertFile: "../test/configs/certs/server-cert.pem", 2863 KeyFile: "../test/configs/certs/server-key.pem", 2864 CaFile: "../test/configs/certs/ca.pem", 2865 } 2866 tlsConf, err := GenTLSConfig(tc) 2867 if err != nil { 2868 t.Fatalf("Error generating TLS config: %v", err) 2869 } 2870 o.Websocket.TLSConfig = tlsConf 2871 } 2872 o.Websocket.Compression = test.acceptCompression 2873 s := RunServer(o) 2874 defer s.Shutdown() 2875 2876 lo := testDefaultRemoteLeafNodeWSOptions(t, o, test.tls) 2877 lo.LeafNode.Remotes[0].Websocket.Compression = test.remoteCompression 2878 lo.LeafNode.Remotes[0].Websocket.NoMasking = !test.masking 2879 ln := RunServer(lo) 2880 defer ln.Shutdown() 2881 2882 checkLeafNodeConnected(t, s) 2883 checkLeafNodeConnected(t, ln) 2884 2885 var trackSizeConn *testConnTrackSize 2886 if !test.tls { 2887 var cln *client 2888 ln.mu.Lock() 2889 for _, l := range ln.leafs { 2890 cln = l 2891 break 2892 } 2893 ln.mu.Unlock() 2894 cln.mu.Lock() 2895 trackSizeConn = &testConnTrackSize{Conn: cln.nc} 2896 cln.nc = trackSizeConn 2897 cln.mu.Unlock() 2898 } 2899 2900 nc1 := natsConnect(t, s.ClientURL()) 2901 defer nc1.Close() 2902 sub1 := natsSubSync(t, nc1, "foo") 2903 natsFlush(t, nc1) 2904 2905 checkSubInterest(t, ln, globalAccountName, "foo", time.Second) 2906 2907 nc2 := natsConnect(t, ln.ClientURL()) 2908 defer nc2.Close() 2909 msg1Payload := make([]byte, 2048) 2910 for i := 0; i < len(msg1Payload); i++ { 2911 msg1Payload[i] = 'A' 2912 } 2913 natsPub(t, nc2, "foo", msg1Payload) 2914 2915 msg := natsNexMsg(t, sub1, time.Second) 2916 if !bytes.Equal(msg.Data, msg1Payload) { 2917 t.Fatalf("Invalid message: %q", msg.Data) 2918 } 2919 2920 sub2 := natsSubSync(t, nc2, "bar") 2921 natsFlush(t, nc2) 2922 2923 checkSubInterest(t, s, globalAccountName, "bar", time.Second) 2924 2925 msg2Payload := make([]byte, 2048) 2926 for i := 0; i < len(msg2Payload); i++ { 2927 msg2Payload[i] = 'B' 2928 } 2929 natsPub(t, nc1, "bar", msg2Payload) 2930 2931 msg = natsNexMsg(t, sub2, time.Second) 2932 if !bytes.Equal(msg.Data, msg2Payload) { 2933 t.Fatalf("Invalid message: %q", msg.Data) 2934 } 2935 2936 if !test.tls { 2937 trackSizeConn.Lock() 2938 size := trackSizeConn.sz 2939 trackSizeConn.Unlock() 2940 2941 if test.acceptCompression && test.remoteCompression { 2942 if size >= 1024 { 2943 t.Fatalf("Seems that there was no compression: size=%v", size) 2944 } 2945 } else if size < 2048 { 2946 t.Fatalf("Seems compression was on while it should not: size=%v", size) 2947 } 2948 } 2949 }) 2950 } 2951 } 2952 2953 func TestLeafNodeWSRemoteCompressAndMaskingOptions(t *testing.T) { 2954 for _, test := range []struct { 2955 name string 2956 compress bool 2957 compStr string 2958 noMasking bool 2959 noMaskStr string 2960 }{ 2961 {"compression masking", true, "true", false, "false"}, 2962 {"compression no masking", true, "true", true, "true"}, 2963 {"no compression masking", false, "false", false, "false"}, 2964 {"no compression no masking", false, "false", true, "true"}, 2965 } { 2966 t.Run(test.name, func(t *testing.T) { 2967 conf := createConfFile(t, []byte(fmt.Sprintf(` 2968 port: -1 2969 leafnodes { 2970 remotes [ 2971 {url: "ws://127.0.0.1:1234", ws_compression: %s, ws_no_masking: %s} 2972 ] 2973 } 2974 `, test.compStr, test.noMaskStr))) 2975 o, err := ProcessConfigFile(conf) 2976 if err != nil { 2977 t.Fatalf("Error loading conf: %v", err) 2978 } 2979 if nr := len(o.LeafNode.Remotes); nr != 1 { 2980 t.Fatalf("Expected 1 remote, got %v", nr) 2981 } 2982 r := o.LeafNode.Remotes[0] 2983 if cur := r.Websocket.Compression; cur != test.compress { 2984 t.Fatalf("Expected compress to be %v, got %v", test.compress, cur) 2985 } 2986 if cur := r.Websocket.NoMasking; cur != test.noMasking { 2987 t.Fatalf("Expected ws_masking to be %v, got %v", test.noMasking, cur) 2988 } 2989 }) 2990 } 2991 } 2992 2993 func TestLeafNodeWSNoMaskingRejected(t *testing.T) { 2994 wsTestRejectNoMasking = true 2995 defer func() { wsTestRejectNoMasking = false }() 2996 2997 o := testDefaultLeafNodeWSOptions() 2998 s := RunServer(o) 2999 defer s.Shutdown() 3000 3001 lo := testDefaultRemoteLeafNodeWSOptions(t, o, false) 3002 lo.LeafNode.Remotes[0].Websocket.NoMasking = true 3003 ln := RunServer(lo) 3004 defer ln.Shutdown() 3005 3006 checkLeafNodeConnected(t, s) 3007 checkLeafNodeConnected(t, ln) 3008 3009 var cln *client 3010 ln.mu.Lock() 3011 for _, l := range ln.leafs { 3012 cln = l 3013 break 3014 } 3015 ln.mu.Unlock() 3016 3017 cln.mu.Lock() 3018 maskWrite := cln.ws.maskwrite 3019 cln.mu.Unlock() 3020 3021 if !maskWrite { 3022 t.Fatal("Leafnode remote connection should mask writes, it does not") 3023 } 3024 } 3025 3026 func TestLeafNodeWSSubPath(t *testing.T) { 3027 o := testDefaultLeafNodeWSOptions() 3028 s := RunServer(o) 3029 defer s.Shutdown() 3030 3031 lo := testDefaultRemoteLeafNodeWSOptions(t, o, false) 3032 ln := RunServer(lo) 3033 defer ln.Shutdown() 3034 3035 // Confirm that it can connect using the subpath. 3036 checkLeafNodeConnected(t, s) 3037 checkLeafNodeConnected(t, ln) 3038 3039 // Add another leafnode that tries to connect to the subpath 3040 // but intercept the attempt for the test. 3041 o2 := testDefaultLeafNodeWSOptions() 3042 lo2 := testDefaultRemoteLeafNodeWSOptions(t, o2, false) 3043 attempts := make(chan string, 2) 3044 ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 3045 attempts <- r.URL.String() 3046 })) 3047 u, _ := url.Parse(fmt.Sprintf("%v/some/path", ts.URL)) 3048 u.Scheme = "ws" 3049 lo2.LeafNode.Remotes = []*RemoteLeafOpts{ 3050 { 3051 URLs: []*url.URL{u}, 3052 }, 3053 } 3054 ln2 := RunServer(lo2) 3055 defer ln2.Shutdown() 3056 3057 expected := "/some/path/leafnode" 3058 select { 3059 case got := <-attempts: 3060 if got != expected { 3061 t.Fatalf("Expected: %v, got: %v", expected, got) 3062 } 3063 case <-time.After(2 * time.Second): 3064 t.Fatal("Timed out waiting for leaf ws connect attempt") 3065 } 3066 } 3067 3068 func TestLeafNodeWSFailedConnection(t *testing.T) { 3069 o := testDefaultLeafNodeWSOptions() 3070 s := RunServer(o) 3071 defer s.Shutdown() 3072 3073 lo := testDefaultRemoteLeafNodeWSOptions(t, o, true) 3074 lo.LeafNode.ReconnectInterval = 100 * time.Millisecond 3075 ln := RunServer(lo) 3076 defer ln.Shutdown() 3077 3078 el := &captureErrorLogger{errCh: make(chan string, 100)} 3079 ln.SetLogger(el, false, false) 3080 3081 select { 3082 case err := <-el.errCh: 3083 if !strings.Contains(err, "handshake error") { 3084 t.Fatalf("Unexpected error: %v", err) 3085 } 3086 case <-time.After(time.Second): 3087 t.Fatal("No error reported!") 3088 } 3089 ln.Shutdown() 3090 s.Shutdown() 3091 3092 lst, err := natsListen("tcp", "127.0.0.1:0") 3093 if err != nil { 3094 t.Fatalf("Error starting listener: %v", err) 3095 } 3096 defer lst.Close() 3097 3098 wg := sync.WaitGroup{} 3099 wg.Add(2) 3100 3101 go func() { 3102 defer wg.Done() 3103 3104 for i := 0; i < 10; i++ { 3105 c, err := lst.Accept() 3106 if err != nil { 3107 return 3108 } 3109 time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond) 3110 if rand.Intn(2) == 1 { 3111 c.Write([]byte("something\r\n")) 3112 } 3113 c.Close() 3114 } 3115 }() 3116 3117 time.Sleep(100 * time.Millisecond) 3118 3119 port := lst.Addr().(*net.TCPAddr).Port 3120 u, _ := url.Parse(fmt.Sprintf("ws://127.0.0.1:%d", port)) 3121 lo = DefaultOptions() 3122 lo.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{u}}} 3123 lo.LeafNode.ReconnectInterval = 10 * time.Millisecond 3124 ln, _ = NewServer(lo) 3125 el = &captureErrorLogger{errCh: make(chan string, 100)} 3126 ln.SetLogger(el, false, false) 3127 3128 go func() { 3129 ln.Start() 3130 wg.Done() 3131 }() 3132 3133 timeout := time.NewTimer(time.Second) 3134 for i := 0; i < 10; i++ { 3135 select { 3136 case err := <-el.errCh: 3137 if !strings.Contains(err, "Error soliciting") { 3138 t.Fatalf("Unexpected error: %v", err) 3139 } 3140 case <-timeout.C: 3141 t.Fatal("No error reported!") 3142 } 3143 } 3144 ln.Shutdown() 3145 lst.Close() 3146 wg.Wait() 3147 } 3148 3149 func TestLeafNodeWSAuth(t *testing.T) { 3150 template := ` 3151 port: -1 3152 authorization { 3153 users [ 3154 {user: "user", pass: "puser", connection_types: ["%s"]} 3155 {user: "leaf", pass: "pleaf", connection_types: ["%s"%s]} 3156 ] 3157 } 3158 websocket { 3159 port: -1 3160 no_tls: true 3161 } 3162 leafnodes { 3163 port: -1 3164 } 3165 ` 3166 s, o, conf := runReloadServerWithContent(t, 3167 []byte(fmt.Sprintf(template, jwt.ConnectionTypeStandard, jwt.ConnectionTypeLeafnode, ""))) 3168 defer s.Shutdown() 3169 3170 l := &captureErrorLogger{errCh: make(chan string, 10)} 3171 s.SetLogger(l, false, false) 3172 3173 lo := testDefaultRemoteLeafNodeWSOptions(t, o, false) 3174 u, _ := url.Parse(fmt.Sprintf("ws://leaf:pleaf@127.0.0.1:%d", o.Websocket.Port)) 3175 remote := &RemoteLeafOpts{URLs: []*url.URL{u}} 3176 lo.LeafNode.Remotes = []*RemoteLeafOpts{remote} 3177 lo.LeafNode.ReconnectInterval = 50 * time.Millisecond 3178 ln := RunServer(lo) 3179 defer ln.Shutdown() 3180 3181 var lasterr string 3182 tm := time.NewTimer(2 * time.Second) 3183 for done := false; !done; { 3184 select { 3185 case lasterr = <-l.errCh: 3186 if strings.Contains(lasterr, "authentication") { 3187 done = true 3188 } 3189 case <-tm.C: 3190 t.Fatalf("Expected auth error, got %v", lasterr) 3191 } 3192 } 3193 3194 ws := fmt.Sprintf(`, "%s"`, jwt.ConnectionTypeLeafnodeWS) 3195 reloadUpdateConfig(t, s, conf, fmt.Sprintf(template, 3196 jwt.ConnectionTypeStandard, jwt.ConnectionTypeLeafnode, ws)) 3197 3198 checkLeafNodeConnected(t, s) 3199 checkLeafNodeConnected(t, ln) 3200 3201 nc1 := natsConnect(t, fmt.Sprintf("nats://user:puser@127.0.0.1:%d", o.Port)) 3202 defer nc1.Close() 3203 3204 sub := natsSubSync(t, nc1, "foo") 3205 natsFlush(t, nc1) 3206 3207 checkSubInterest(t, ln, globalAccountName, "foo", time.Second) 3208 3209 nc2 := natsConnect(t, ln.ClientURL()) 3210 defer nc2.Close() 3211 3212 natsPub(t, nc2, "foo", []byte("msg1")) 3213 msg := natsNexMsg(t, sub, time.Second) 3214 3215 if md := string(msg.Data); md != "msg1" { 3216 t.Fatalf("Invalid message: %q", md) 3217 } 3218 } 3219 3220 func TestLeafNodeWSGossip(t *testing.T) { 3221 o1 := testDefaultLeafNodeWSOptions() 3222 s1 := RunServer(o1) 3223 defer s1.Shutdown() 3224 3225 // Now connect from a server that knows only about s1 3226 lo := testDefaultRemoteLeafNodeWSOptions(t, o1, false) 3227 lo.LeafNode.ReconnectInterval = 15 * time.Millisecond 3228 ln := RunServer(lo) 3229 defer ln.Shutdown() 3230 3231 checkLeafNodeConnected(t, s1) 3232 checkLeafNodeConnected(t, ln) 3233 3234 // Now add a routed server to s1 3235 o2 := testDefaultLeafNodeWSOptions() 3236 o2.Routes = RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", o1.Cluster.Port)) 3237 s2 := RunServer(o2) 3238 defer s2.Shutdown() 3239 3240 // Wait for cluster to form 3241 checkClusterFormed(t, s1, s2) 3242 3243 // Now shutdown s1 and check that ln is able to reconnect to s2. 3244 s1.Shutdown() 3245 3246 checkLeafNodeConnected(t, s2) 3247 checkLeafNodeConnected(t, ln) 3248 3249 // Make sure that the reconnection was as a WS connection, not simply to 3250 // the regular LN port. 3251 var s2lc *client 3252 s2.mu.Lock() 3253 for _, l := range s2.leafs { 3254 s2lc = l 3255 break 3256 } 3257 s2.mu.Unlock() 3258 3259 s2lc.mu.Lock() 3260 isWS := s2lc.isWebsocket() 3261 s2lc.mu.Unlock() 3262 3263 if !isWS { 3264 t.Fatal("Leafnode connection is not websocket!") 3265 } 3266 } 3267 3268 // This test was showing an issue if one set maxBufSize to very small value, 3269 // such as maxBufSize = 10. With such small value, we would get a corruption 3270 // in that LMSG would arrive with missing bytes. We are now always making 3271 // a copy when dealing with messages that are bigger than maxBufSize. 3272 func TestLeafNodeWSNoBufferCorruption(t *testing.T) { 3273 o := testDefaultLeafNodeWSOptions() 3274 s := RunServer(o) 3275 defer s.Shutdown() 3276 3277 lo1 := testDefaultRemoteLeafNodeWSOptions(t, o, false) 3278 lo1.LeafNode.ReconnectInterval = 15 * time.Millisecond 3279 ln1 := RunServer(lo1) 3280 defer ln1.Shutdown() 3281 3282 lo2 := DefaultOptions() 3283 lo2.Cluster.Name = "LN" 3284 lo2.Routes = RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", lo1.Cluster.Port)) 3285 ln2 := RunServer(lo2) 3286 defer ln2.Shutdown() 3287 3288 checkClusterFormed(t, ln1, ln2) 3289 3290 checkLeafNodeConnected(t, s) 3291 checkLeafNodeConnected(t, ln1) 3292 3293 nc := natsConnect(t, s.ClientURL()) 3294 defer nc.Close() 3295 sub := natsSubSync(t, nc, "foo") 3296 3297 nc1 := natsConnect(t, ln1.ClientURL()) 3298 defer nc1.Close() 3299 3300 nc2 := natsConnect(t, ln2.ClientURL()) 3301 defer nc2.Close() 3302 sub2 := natsSubSync(t, nc2, "foo") 3303 3304 checkSubInterest(t, s, globalAccountName, "foo", time.Second) 3305 checkSubInterest(t, ln2, globalAccountName, "foo", time.Second) 3306 checkSubInterest(t, ln1, globalAccountName, "foo", time.Second) 3307 3308 payload := make([]byte, 100*1024) 3309 for i := 0; i < len(payload); i++ { 3310 payload[i] = 'A' 3311 } 3312 natsPub(t, nc1, "foo", payload) 3313 3314 checkMsgRcv := func(sub *nats.Subscription) { 3315 msg := natsNexMsg(t, sub, time.Second) 3316 if !bytes.Equal(msg.Data, payload) { 3317 t.Fatalf("Invalid message content: %q", msg.Data) 3318 } 3319 } 3320 checkMsgRcv(sub2) 3321 checkMsgRcv(sub) 3322 } 3323 3324 func TestLeafNodeWSRemoteNoTLSBlockWithWSSProto(t *testing.T) { 3325 o := testDefaultLeafNodeWSOptions() 3326 o.Websocket.NoTLS = false 3327 tc := &TLSConfigOpts{ 3328 CertFile: "../test/configs/certs/server-cert.pem", 3329 KeyFile: "../test/configs/certs/server-key.pem", 3330 CaFile: "../test/configs/certs/ca.pem", 3331 } 3332 tlsConf, err := GenTLSConfig(tc) 3333 if err != nil { 3334 t.Fatalf("Error generating TLS config: %v", err) 3335 } 3336 o.Websocket.TLSConfig = tlsConf 3337 s := RunServer(o) 3338 defer s.Shutdown() 3339 3340 // The test will make sure that if the protocol is "wss://", a TLS handshake must 3341 // be initiated, regardless of the presence of a TLS config block in config file 3342 // or here directly. 3343 // A bug was causing the absence of TLS config block to initiate a non TLS connection 3344 // even if "wss://" proto was specified, which would lead to "invalid websocket connection" 3345 // errors in the log. 3346 // With the fix, the connection will fail because the remote will fail to verify 3347 // the root CA, but at least, we will make sure that this is not an "invalid websocket connection" 3348 3349 u, _ := url.Parse(fmt.Sprintf("wss://127.0.0.1:%d/some/path", o.Websocket.Port)) 3350 lo := DefaultOptions() 3351 lo.Cluster.Name = "LN" 3352 remote := &RemoteLeafOpts{URLs: []*url.URL{u}} 3353 lo.LeafNode.Remotes = []*RemoteLeafOpts{remote} 3354 lo.LeafNode.ReconnectInterval = 100 * time.Millisecond 3355 ln := RunServer(lo) 3356 defer ln.Shutdown() 3357 3358 l := &captureErrorLogger{errCh: make(chan string, 10)} 3359 ln.SetLogger(l, false, false) 3360 3361 select { 3362 case e := <-l.errCh: 3363 if strings.Contains(e, "invalid websocket connection") { 3364 t.Fatalf("The remote did not try to create a TLS connection: %v", e) 3365 } 3366 // OK! 3367 return 3368 case <-time.After(2 * time.Second): 3369 t.Fatal("Connection should fail") 3370 } 3371 } 3372 3373 func TestLeafNodeWSNoAuthUser(t *testing.T) { 3374 conf := createConfFile(t, []byte(` 3375 port: -1 3376 accounts { 3377 A { users [ {user: a, password: a} ]} 3378 B { users [ {user: b, password: b} ]} 3379 } 3380 websocket { 3381 port: -1 3382 no_tls: true 3383 no_auth_user: a 3384 } 3385 leafnodes { 3386 port: -1 3387 } 3388 `)) 3389 s, o := RunServerWithConfig(conf) 3390 defer s.Shutdown() 3391 3392 nc1 := natsConnect(t, fmt.Sprintf("nats://a:a@127.0.0.1:%d", o.Port)) 3393 defer nc1.Close() 3394 3395 lconf := createConfFile(t, []byte(fmt.Sprintf(` 3396 port: -1 3397 accounts { 3398 A { users [ {user: a, password: a} ]} 3399 B { users [ {user: b, password: b} ]} 3400 } 3401 leafnodes { 3402 remotes [ 3403 { 3404 url: "ws://127.0.0.1:%d" 3405 account: A 3406 } 3407 ] 3408 } 3409 `, o.Websocket.Port))) 3410 3411 ln, lo := RunServerWithConfig(lconf) 3412 defer ln.Shutdown() 3413 3414 checkLeafNodeConnected(t, s) 3415 checkLeafNodeConnected(t, ln) 3416 3417 nc2 := natsConnect(t, fmt.Sprintf("nats://a:a@127.0.0.1:%d", lo.Port)) 3418 defer nc2.Close() 3419 3420 sub := natsSubSync(t, nc2, "foo") 3421 natsFlush(t, nc2) 3422 3423 checkSubInterest(t, s, "A", "foo", time.Second) 3424 3425 natsPub(t, nc1, "foo", []byte("msg1")) 3426 msg := natsNexMsg(t, sub, time.Second) 3427 3428 if md := string(msg.Data); md != "msg1" { 3429 t.Fatalf("Invalid message: %q", md) 3430 } 3431 } 3432 3433 func TestLeafNodeStreamImport(t *testing.T) { 3434 o1 := DefaultOptions() 3435 o1.LeafNode.Port = -1 3436 accA := NewAccount("A") 3437 o1.Accounts = []*Account{accA} 3438 o1.Users = []*User{{Username: "a", Password: "a", Account: accA}} 3439 o1.LeafNode.Account = "A" 3440 o1.NoAuthUser = "a" 3441 s1 := RunServer(o1) 3442 defer s1.Shutdown() 3443 3444 o2 := DefaultOptions() 3445 o2.LeafNode.Port = -1 3446 o2.Cluster.Name = "xyz" 3447 3448 accB := NewAccount("B") 3449 if err := accB.AddStreamExport(">", nil); err != nil { 3450 t.Fatalf("Error adding stream export: %v", err) 3451 } 3452 3453 accC := NewAccount("C") 3454 if err := accC.AddStreamImport(accB, ">", ""); err != nil { 3455 t.Fatalf("Error adding stream import: %v", err) 3456 } 3457 3458 o2.Accounts = []*Account{accB, accC} 3459 o2.Users = []*User{{Username: "b", Password: "b", Account: accB}, {Username: "c", Password: "c", Account: accC}} 3460 o2.NoAuthUser = "b" 3461 u, err := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", o1.LeafNode.Port)) 3462 if err != nil { 3463 t.Fatalf("Error parsing url: %v", err) 3464 } 3465 o2.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{u}, LocalAccount: "C"}} 3466 s2 := RunServer(o2) 3467 defer s2.Shutdown() 3468 3469 nc1 := natsConnect(t, s1.ClientURL()) 3470 defer nc1.Close() 3471 3472 sub := natsSubSync(t, nc1, "a") 3473 3474 checkSubInterest(t, s2, "C", "a", time.Second) 3475 3476 nc2 := natsConnect(t, s2.ClientURL()) 3477 defer nc2.Close() 3478 3479 natsPub(t, nc2, "a", []byte("hello?")) 3480 3481 natsNexMsg(t, sub, time.Second) 3482 } 3483 3484 func TestLeafNodeRouteSubWithOrigin(t *testing.T) { 3485 lo1 := DefaultOptions() 3486 lo1.LeafNode.Host = "127.0.0.1" 3487 lo1.LeafNode.Port = -1 3488 lo1.Cluster.Name = "local" 3489 lo1.Cluster.Host = "127.0.0.1" 3490 lo1.Cluster.Port = -1 3491 l1 := RunServer(lo1) 3492 defer l1.Shutdown() 3493 3494 lo2 := DefaultOptions() 3495 lo2.LeafNode.Host = "127.0.0.1" 3496 lo2.LeafNode.Port = -1 3497 lo2.Cluster.Name = "local" 3498 lo2.Cluster.Host = "127.0.0.1" 3499 lo2.Cluster.Port = -1 3500 lo2.Routes = RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", lo1.Cluster.Port)) 3501 l2 := RunServer(lo2) 3502 defer l2.Shutdown() 3503 3504 checkClusterFormed(t, l1, l2) 3505 3506 u1, _ := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", lo1.LeafNode.Port)) 3507 urls := []*url.URL{u1} 3508 3509 ro1 := DefaultOptions() 3510 ro1.Cluster.Name = "remote" 3511 ro1.Cluster.Host = "127.0.0.1" 3512 ro1.Cluster.Port = -1 3513 ro1.LeafNode.ReconnectInterval = 50 * time.Millisecond 3514 ro1.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: urls}} 3515 r1 := RunServer(ro1) 3516 defer r1.Shutdown() 3517 3518 checkLeafNodeConnected(t, r1) 3519 3520 nc := natsConnect(t, r1.ClientURL(), nats.NoReconnect()) 3521 defer nc.Close() 3522 natsSubSync(t, nc, "foo") 3523 natsQueueSubSync(t, nc, "bar", "baz") 3524 checkSubInterest(t, l2, globalAccountName, "foo", time.Second) 3525 checkSubInterest(t, l2, globalAccountName, "bar", time.Second) 3526 3527 // Now shutdown the leafnode and check that any subscription for $G on l2 are gone. 3528 r1.Shutdown() 3529 checkFor(t, time.Second, 15*time.Millisecond, func() error { 3530 acc := l2.GlobalAccount() 3531 if n := acc.TotalSubs(); n != 5 { 3532 return fmt.Errorf("Account %q should have 5 subs, got %v", acc.GetName(), n) 3533 } 3534 return nil 3535 }) 3536 } 3537 3538 func TestLeafNodeLoopDetectionWithMultipleClusters(t *testing.T) { 3539 lo1 := DefaultOptions() 3540 lo1.LeafNode.Host = "127.0.0.1" 3541 lo1.LeafNode.Port = -1 3542 lo1.Cluster.Name = "local" 3543 lo1.Cluster.Host = "127.0.0.1" 3544 lo1.Cluster.Port = -1 3545 l1 := RunServer(lo1) 3546 defer l1.Shutdown() 3547 3548 lo2 := DefaultOptions() 3549 lo2.LeafNode.Host = "127.0.0.1" 3550 lo2.LeafNode.Port = -1 3551 lo2.Cluster.Name = "local" 3552 lo2.Cluster.Host = "127.0.0.1" 3553 lo2.Cluster.Port = -1 3554 lo2.Routes = RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", lo1.Cluster.Port)) 3555 l2 := RunServer(lo2) 3556 defer l2.Shutdown() 3557 3558 checkClusterFormed(t, l1, l2) 3559 3560 ro1 := DefaultOptions() 3561 ro1.Cluster.Name = "remote" 3562 ro1.Cluster.Host = "127.0.0.1" 3563 ro1.Cluster.Port = -1 3564 ro1.LeafNode.ReconnectInterval = 50 * time.Millisecond 3565 ro1.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{ 3566 {Scheme: "nats", Host: fmt.Sprintf("127.0.0.1:%d", lo1.LeafNode.Port)}, 3567 {Scheme: "nats", Host: fmt.Sprintf("127.0.0.1:%d", lo2.LeafNode.Port)}, 3568 }}} 3569 r1 := RunServer(ro1) 3570 defer r1.Shutdown() 3571 3572 l := &captureErrorLogger{errCh: make(chan string, 100)} 3573 r1.SetLogger(l, false, false) 3574 3575 ro2 := DefaultOptions() 3576 ro2.Cluster.Name = "remote" 3577 ro2.Cluster.Host = "127.0.0.1" 3578 ro2.Cluster.Port = -1 3579 ro2.Routes = RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", ro1.Cluster.Port)) 3580 ro2.LeafNode.ReconnectInterval = 50 * time.Millisecond 3581 ro2.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{ 3582 {Scheme: "nats", Host: fmt.Sprintf("127.0.0.1:%d", lo1.LeafNode.Port)}, 3583 {Scheme: "nats", Host: fmt.Sprintf("127.0.0.1:%d", lo2.LeafNode.Port)}, 3584 }}} 3585 r2 := RunServer(ro2) 3586 defer r2.Shutdown() 3587 3588 checkClusterFormed(t, r1, r2) 3589 checkLeafNodeConnected(t, r1) 3590 checkLeafNodeConnected(t, r2) 3591 3592 l1.Shutdown() 3593 3594 // Now wait for r1 and r2 to reconnect, they should not have a problem of loop detection. 3595 checkLeafNodeConnected(t, r1) 3596 checkLeafNodeConnected(t, r2) 3597 3598 // Wait and make sure we don't have a loop error 3599 timeout := time.NewTimer(500 * time.Millisecond) 3600 for { 3601 select { 3602 case err := <-l.errCh: 3603 if strings.Contains(err, "Loop detected") { 3604 t.Fatal(err) 3605 } 3606 case <-timeout.C: 3607 // OK, we are done. 3608 return 3609 } 3610 } 3611 } 3612 3613 func TestLeafNodeUnsubOnRouteDisconnect(t *testing.T) { 3614 lo1 := DefaultOptions() 3615 lo1.LeafNode.Host = "127.0.0.1" 3616 lo1.LeafNode.Port = -1 3617 lo1.Cluster.Name = "local" 3618 lo1.Cluster.Host = "127.0.0.1" 3619 lo1.Cluster.Port = -1 3620 l1 := RunServer(lo1) 3621 defer l1.Shutdown() 3622 3623 lo2 := DefaultOptions() 3624 lo2.LeafNode.Host = "127.0.0.1" 3625 lo2.LeafNode.Port = -1 3626 lo2.Cluster.Name = "local" 3627 lo2.Cluster.Host = "127.0.0.1" 3628 lo2.Cluster.Port = -1 3629 lo2.Routes = RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", lo1.Cluster.Port)) 3630 l2 := RunServer(lo2) 3631 defer l2.Shutdown() 3632 3633 checkClusterFormed(t, l1, l2) 3634 3635 u1, _ := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", lo1.LeafNode.Port)) 3636 u2, _ := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", lo2.LeafNode.Port)) 3637 urls := []*url.URL{u1, u2} 3638 3639 ro1 := DefaultOptions() 3640 // DefaultOptions sets a cluster name, so make sure they are different. 3641 // Also, we don't have r1 and r2 clustered in this test, so set port to 0. 3642 ro1.Cluster.Name = _EMPTY_ 3643 ro1.Cluster.Port = 0 3644 ro1.LeafNode.ReconnectInterval = 50 * time.Millisecond 3645 ro1.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: urls}} 3646 r1 := RunServer(ro1) 3647 defer r1.Shutdown() 3648 3649 ro2 := DefaultOptions() 3650 ro1.Cluster.Name = _EMPTY_ 3651 ro2.Cluster.Port = 0 3652 ro2.LeafNode.ReconnectInterval = 50 * time.Millisecond 3653 // Have this one point only to l2 3654 ro2.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{u2}}} 3655 r2 := RunServer(ro2) 3656 defer r2.Shutdown() 3657 3658 checkLeafNodeConnected(t, r1) 3659 checkLeafNodeConnected(t, r2) 3660 3661 // Create a subscription on r1. 3662 nc := natsConnect(t, r1.ClientURL()) 3663 defer nc.Close() 3664 sub := natsSubSync(t, nc, "foo") 3665 natsFlush(t, nc) 3666 3667 checkSubInterest(t, l2, globalAccountName, "foo", time.Second) 3668 checkSubInterest(t, r2, globalAccountName, "foo", time.Second) 3669 3670 nc2 := natsConnect(t, r2.ClientURL()) 3671 defer nc2.Close() 3672 natsPub(t, nc, "foo", []byte("msg1")) 3673 3674 // Check message received 3675 natsNexMsg(t, sub, time.Second) 3676 3677 // Now shutdown l1, l2 should update subscription interest to r2. 3678 // When r1 reconnects to l2, subscription should be updated too. 3679 l1.Shutdown() 3680 3681 // Wait a bit (so that the check of interest is not OK just because 3682 // the route would not have been yet detected as broken), and check 3683 // interest still present on r2, l2. 3684 time.Sleep(100 * time.Millisecond) 3685 checkSubInterest(t, l2, globalAccountName, "foo", time.Second) 3686 checkSubInterest(t, r2, globalAccountName, "foo", time.Second) 3687 3688 // Check again that message received ok 3689 natsPub(t, nc, "foo", []byte("msg2")) 3690 natsNexMsg(t, sub, time.Second) 3691 3692 // Now close client. Interest should disappear on r2. Due to a bug, 3693 // it was not. 3694 nc.Close() 3695 3696 checkFor(t, time.Second, 15*time.Millisecond, func() error { 3697 acc := r2.GlobalAccount() 3698 if n := acc.Interest("foo"); n != 0 { 3699 return fmt.Errorf("Still interest on subject: %v", n) 3700 } 3701 return nil 3702 }) 3703 } 3704 3705 func TestLeafNodeNoPingBeforeConnect(t *testing.T) { 3706 o := DefaultOptions() 3707 o.LeafNode.Port = -1 3708 o.LeafNode.AuthTimeout = 0.5 3709 // For this test we need to disable compression, because we do use 3710 // the ping timer instead of the auth timer before the negotiation 3711 // is complete. 3712 o.LeafNode.Compression.Mode = CompressionOff 3713 s := RunServer(o) 3714 defer s.Shutdown() 3715 3716 addr := fmt.Sprintf("127.0.0.1:%d", o.LeafNode.Port) 3717 c, err := net.Dial("tcp", addr) 3718 if err != nil { 3719 t.Fatalf("Error on dial: %v", err) 3720 } 3721 defer c.Close() 3722 3723 // Read the info 3724 br := bufio.NewReader(c) 3725 c.SetReadDeadline(time.Now().Add(time.Second)) 3726 l, _, err := br.ReadLine() 3727 if err != nil { 3728 t.Fatalf("Error on read: %v", err) 3729 } 3730 if !strings.HasPrefix(string(l), "INFO") { 3731 t.Fatalf("Wrong proto: %q", l) 3732 } 3733 3734 var leaf *client 3735 checkFor(t, time.Second, 15*time.Millisecond, func() error { 3736 s.grMu.Lock() 3737 for _, l := range s.grTmpClients { 3738 leaf = l 3739 break 3740 } 3741 s.grMu.Unlock() 3742 if leaf == nil { 3743 return fmt.Errorf("No leaf connection found") 3744 } 3745 return nil 3746 }) 3747 3748 // Make sure that ping timer is not set 3749 leaf.mu.Lock() 3750 ptmrSet := leaf.ping.tmr != nil 3751 leaf.mu.Unlock() 3752 3753 if ptmrSet { 3754 t.Fatal("Ping timer was set before CONNECT was processed") 3755 } 3756 3757 // Send CONNECT 3758 if _, err := c.Write([]byte("CONNECT {}\r\n")); err != nil { 3759 t.Fatalf("Error writing connect: %v", err) 3760 } 3761 3762 // Check that we correctly set the timer now 3763 checkFor(t, time.Second, 15*time.Millisecond, func() error { 3764 leaf.mu.Lock() 3765 ptmrSet := leaf.ping.tmr != nil 3766 leaf.mu.Unlock() 3767 if !ptmrSet { 3768 return fmt.Errorf("Timer still not set") 3769 } 3770 return nil 3771 }) 3772 3773 // Reduce the first ping.. 3774 leaf.mu.Lock() 3775 leaf.ping.tmr.Reset(15 * time.Millisecond) 3776 leaf.mu.Unlock() 3777 3778 // Now consume that PING (we may get LS+, etc..) 3779 for { 3780 c.SetReadDeadline(time.Now().Add(time.Second)) 3781 l, _, err = br.ReadLine() 3782 if err != nil { 3783 t.Fatalf("Error on read: %v", err) 3784 } 3785 if strings.HasPrefix(string(l), "PING") { 3786 checkLeafNodeConnected(t, s) 3787 return 3788 } 3789 } 3790 } 3791 3792 func TestLeafNodeNoMsgLoop(t *testing.T) { 3793 hubConf := ` 3794 listen: "127.0.0.1:-1" 3795 accounts { 3796 FOO { 3797 users [ 3798 {username: leaf, password: pass} 3799 {username: user, password: pass} 3800 ] 3801 } 3802 } 3803 cluster { 3804 name: "hub" 3805 listen: "127.0.0.1:-1" 3806 %s 3807 } 3808 leafnodes { 3809 listen: "127.0.0.1:-1" 3810 authorization { 3811 account: FOO 3812 } 3813 } 3814 ` 3815 configS1 := createConfFile(t, []byte(fmt.Sprintf(hubConf, ""))) 3816 s1, o1 := RunServerWithConfig(configS1) 3817 defer s1.Shutdown() 3818 3819 configS2S3 := createConfFile(t, []byte(fmt.Sprintf(hubConf, fmt.Sprintf(`routes: ["nats://127.0.0.1:%d"]`, o1.Cluster.Port)))) 3820 s2, o2 := RunServerWithConfig(configS2S3) 3821 defer s2.Shutdown() 3822 3823 s3, _ := RunServerWithConfig(configS2S3) 3824 defer s3.Shutdown() 3825 3826 checkClusterFormed(t, s1, s2, s3) 3827 3828 contentLN := ` 3829 listen: "127.0.0.1:%d" 3830 accounts { 3831 FOO { 3832 users [ 3833 {username: leaf, password: pass} 3834 {username: user, password: pass} 3835 ] 3836 } 3837 } 3838 leafnodes { 3839 remotes = [ 3840 { 3841 url: "nats://leaf:pass@127.0.0.1:%d" 3842 account: FOO 3843 } 3844 ] 3845 } 3846 ` 3847 lnconf := createConfFile(t, []byte(fmt.Sprintf(contentLN, -1, o1.LeafNode.Port))) 3848 sl1, slo1 := RunServerWithConfig(lnconf) 3849 defer sl1.Shutdown() 3850 3851 sl2, slo2 := RunServerWithConfig(lnconf) 3852 defer sl2.Shutdown() 3853 3854 checkLeafNodeConnected(t, sl1) 3855 checkLeafNodeConnected(t, sl2) 3856 3857 // Create users on each leafnode 3858 nc1, err := nats.Connect(fmt.Sprintf("nats://user:pass@127.0.0.1:%d", slo1.Port)) 3859 if err != nil { 3860 t.Fatalf("Error on connect: %v", err) 3861 } 3862 defer nc1.Close() 3863 3864 rch := make(chan struct{}, 1) 3865 nc2, err := nats.Connect( 3866 fmt.Sprintf("nats://user:pass@127.0.0.1:%d", slo2.Port), 3867 nats.ReconnectWait(50*time.Millisecond), 3868 nats.ReconnectHandler(func(_ *nats.Conn) { 3869 rch <- struct{}{} 3870 }), 3871 ) 3872 if err != nil { 3873 t.Fatalf("Error on connect: %v", err) 3874 } 3875 defer nc2.Close() 3876 3877 // Create queue subs on sl2 3878 nc2.QueueSubscribe("foo", "bar", func(_ *nats.Msg) {}) 3879 nc2.QueueSubscribe("foo", "bar", func(_ *nats.Msg) {}) 3880 nc2.Flush() 3881 3882 // Wait for interest to propagate to sl1 3883 checkSubInterest(t, sl1, "FOO", "foo", 250*time.Millisecond) 3884 3885 // Create sub on sl1 3886 ch := make(chan *nats.Msg, 10) 3887 nc1.Subscribe("foo", func(m *nats.Msg) { 3888 select { 3889 case ch <- m: 3890 default: 3891 } 3892 }) 3893 nc1.Flush() 3894 3895 checkSubInterest(t, sl2, "FOO", "foo", 250*time.Millisecond) 3896 3897 // Produce from sl1 3898 nc1.Publish("foo", []byte("msg1")) 3899 3900 // Check message is received by plain sub 3901 select { 3902 case <-ch: 3903 case <-time.After(time.Second): 3904 t.Fatalf("Did not receive message") 3905 } 3906 3907 // Restart leaf node, this time make sure we connect to 2nd server. 3908 sl2.Shutdown() 3909 3910 // Use config file but this time reuse the client port and set the 2nd server for 3911 // the remote leaf node port. 3912 lnconf = createConfFile(t, []byte(fmt.Sprintf(contentLN, slo2.Port, o2.LeafNode.Port))) 3913 sl2, _ = RunServerWithConfig(lnconf) 3914 defer sl2.Shutdown() 3915 3916 checkLeafNodeConnected(t, sl2) 3917 3918 // Wait for client to reconnect 3919 select { 3920 case <-rch: 3921 case <-time.After(time.Second): 3922 t.Fatalf("Did not reconnect") 3923 } 3924 3925 // Produce a new messages 3926 for i := 0; i < 10; i++ { 3927 nc1.Publish("foo", []byte(fmt.Sprintf("msg%d", 2+i))) 3928 3929 // Check sub receives 1 message 3930 select { 3931 case <-ch: 3932 case <-time.After(time.Second): 3933 t.Fatalf("Did not receive message") 3934 } 3935 // Check that there is no more... 3936 select { 3937 case m := <-ch: 3938 t.Fatalf("Loop: received second message %s", m.Data) 3939 case <-time.After(50 * time.Millisecond): 3940 // OK 3941 } 3942 } 3943 } 3944 3945 func TestLeafNodeInterestPropagationDaisychain(t *testing.T) { 3946 aTmpl := ` 3947 port: %d 3948 leafnodes { 3949 port: %d 3950 } 3951 ` 3952 3953 confA := createConfFile(t, []byte(fmt.Sprintf(aTmpl, -1, -1))) 3954 sA, _ := RunServerWithConfig(confA) 3955 defer sA.Shutdown() 3956 3957 aPort := sA.opts.Port 3958 aLeafPort := sA.opts.LeafNode.Port 3959 3960 confB := createConfFile(t, []byte(fmt.Sprintf(` 3961 port: -1 3962 leafnodes { 3963 port: -1 3964 remotes = [{ 3965 url:"nats://127.0.0.1:%d" 3966 }] 3967 }`, aLeafPort))) 3968 sB, _ := RunServerWithConfig(confB) 3969 defer sB.Shutdown() 3970 3971 confC := createConfFile(t, []byte(fmt.Sprintf(` 3972 port: -1 3973 leafnodes { 3974 port: -1 3975 remotes = [{url:"nats://127.0.0.1:%d"}] 3976 }`, sB.opts.LeafNode.Port))) 3977 sC, _ := RunServerWithConfig(confC) 3978 defer sC.Shutdown() 3979 3980 checkLeafNodeConnectedCount(t, sC, 1) 3981 checkLeafNodeConnectedCount(t, sB, 2) 3982 checkLeafNodeConnectedCount(t, sA, 1) 3983 3984 ncC := natsConnect(t, sC.ClientURL()) 3985 defer ncC.Close() 3986 _, err := ncC.SubscribeSync("foo") 3987 require_NoError(t, err) 3988 require_NoError(t, ncC.Flush()) 3989 3990 checkSubInterest(t, sC, "$G", "foo", time.Second) 3991 checkSubInterest(t, sB, "$G", "foo", time.Second) 3992 checkSubInterest(t, sA, "$G", "foo", time.Second) 3993 3994 ncA := natsConnect(t, sA.ClientURL()) 3995 defer ncA.Close() 3996 3997 sA.Shutdown() 3998 sA.WaitForShutdown() 3999 4000 confAA := createConfFile(t, []byte(fmt.Sprintf(aTmpl, aPort, aLeafPort))) 4001 sAA, _ := RunServerWithConfig(confAA) 4002 defer sAA.Shutdown() 4003 4004 checkLeafNodeConnectedCount(t, sAA, 1) 4005 checkLeafNodeConnectedCount(t, sB, 2) 4006 checkLeafNodeConnectedCount(t, sC, 1) 4007 4008 checkSubInterest(t, sC, "$G", "foo", time.Second) 4009 checkSubInterest(t, sB, "$G", "foo", time.Second) 4010 checkSubInterest(t, sAA, "$G", "foo", time.Second) // failure issue 2448 4011 } 4012 4013 func TestLeafNodeQueueGroupWithLateLNJoin(t *testing.T) { 4014 /* 4015 4016 Topology: A cluster of leafnodes LN2 and LN3, connect 4017 to a cluster C1, C2. 4018 4019 sub(foo) sub(foo) 4020 \ / 4021 C1 <-> C2 4022 ^ ^ 4023 | | 4024 LN2 <-> LN3 4025 / \ 4026 sub(foo) sub(foo) 4027 4028 Once the above is set, start LN1 that connects to C1. 4029 4030 sub(foo) sub(foo) 4031 \ / 4032 LN1 -> C1 <-> C2 4033 ^ ^ 4034 | | 4035 LN2 <-> LN3 4036 / \ 4037 sub(foo) sub(foo) 4038 4039 Remove subs to LN3, C2 and C1. 4040 4041 LN1 -> C1 <-> C2 4042 ^ ^ 4043 | | 4044 LN2 <-> LN3 4045 / 4046 sub(foo) 4047 4048 Publish from LN1 and verify message is received by sub on LN2. 4049 4050 pub(foo) 4051 \ 4052 LN1 -> C1 <-> C2 4053 ^ ^ 4054 | | 4055 LN2 <-> LN3 4056 / 4057 sub(foo) 4058 */ 4059 co1 := DefaultOptions() 4060 co1.LeafNode.Host = "127.0.0.1" 4061 co1.LeafNode.Port = -1 4062 co1.Cluster.Name = "ngs" 4063 co1.Cluster.Host = "127.0.0.1" 4064 co1.Cluster.Port = -1 4065 c1 := RunServer(co1) 4066 defer c1.Shutdown() 4067 4068 co2 := DefaultOptions() 4069 co2.LeafNode.Host = "127.0.0.1" 4070 co2.LeafNode.Port = -1 4071 co2.Cluster.Name = "ngs" 4072 co2.Cluster.Host = "127.0.0.1" 4073 co2.Cluster.Port = -1 4074 co2.Routes = RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", co1.Cluster.Port)) 4075 c2 := RunServer(co2) 4076 defer c2.Shutdown() 4077 4078 checkClusterFormed(t, c1, c2) 4079 4080 lo2 := DefaultOptions() 4081 lo2.Cluster.Name = "local" 4082 lo2.Cluster.Host = "127.0.0.1" 4083 lo2.Cluster.Port = -1 4084 lo2.LeafNode.ReconnectInterval = 50 * time.Millisecond 4085 lo2.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{{Scheme: "nats", Host: fmt.Sprintf("127.0.0.1:%d", co1.LeafNode.Port)}}}} 4086 ln2 := RunServer(lo2) 4087 defer ln2.Shutdown() 4088 4089 lo3 := DefaultOptions() 4090 lo3.Cluster.Name = "local" 4091 lo3.Cluster.Host = "127.0.0.1" 4092 lo3.Cluster.Port = -1 4093 lo3.Routes = RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", lo2.Cluster.Port)) 4094 lo3.LeafNode.ReconnectInterval = 50 * time.Millisecond 4095 lo3.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{{Scheme: "nats", Host: fmt.Sprintf("127.0.0.1:%d", co2.LeafNode.Port)}}}} 4096 ln3 := RunServer(lo3) 4097 defer ln3.Shutdown() 4098 4099 checkClusterFormed(t, ln2, ln3) 4100 checkLeafNodeConnected(t, ln2) 4101 checkLeafNodeConnected(t, ln3) 4102 4103 cln2 := natsConnect(t, ln2.ClientURL()) 4104 defer cln2.Close() 4105 sln2 := natsQueueSubSync(t, cln2, "foo", "qgroup") 4106 natsFlush(t, cln2) 4107 4108 cln3 := natsConnect(t, ln3.ClientURL()) 4109 defer cln3.Close() 4110 sln3 := natsQueueSubSync(t, cln3, "foo", "qgroup") 4111 natsFlush(t, cln3) 4112 4113 cc1 := natsConnect(t, c1.ClientURL()) 4114 defer cc1.Close() 4115 sc1 := natsQueueSubSync(t, cc1, "foo", "qgroup") 4116 natsFlush(t, cc1) 4117 4118 cc2 := natsConnect(t, c2.ClientURL()) 4119 defer cc2.Close() 4120 sc2 := natsQueueSubSync(t, cc2, "foo", "qgroup") 4121 natsFlush(t, cc2) 4122 4123 checkSubInterest(t, c1, globalAccountName, "foo", time.Second) 4124 checkSubInterest(t, c2, globalAccountName, "foo", time.Second) 4125 checkSubInterest(t, ln2, globalAccountName, "foo", time.Second) 4126 checkSubInterest(t, ln3, globalAccountName, "foo", time.Second) 4127 4128 lo1 := DefaultOptions() 4129 lo1.LeafNode.ReconnectInterval = 50 * time.Millisecond 4130 lo1.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{{Scheme: "nats", Host: fmt.Sprintf("127.0.0.1:%d", co1.LeafNode.Port)}}}} 4131 ln1 := RunServer(lo1) 4132 defer ln1.Shutdown() 4133 4134 checkLeafNodeConnected(t, ln1) 4135 checkSubInterest(t, ln1, globalAccountName, "foo", time.Second) 4136 4137 sln3.Unsubscribe() 4138 natsFlush(t, cln3) 4139 sc2.Unsubscribe() 4140 natsFlush(t, cc2) 4141 sc1.Unsubscribe() 4142 natsFlush(t, cc1) 4143 4144 cln1 := natsConnect(t, ln1.ClientURL()) 4145 defer cln1.Close() 4146 4147 natsPub(t, cln1, "foo", []byte("hello")) 4148 natsNexMsg(t, sln2, time.Second) 4149 } 4150 4151 func TestLeafNodeJetStreamDomainMapCrossTalk(t *testing.T) { 4152 accs := ` 4153 accounts :{ 4154 A:{ jetstream: enable, users:[ {user:a1,password:a1}]}, 4155 SYS:{ users:[ {user:s1,password:s1}]}, 4156 } 4157 system_account: SYS 4158 ` 4159 4160 sd1 := t.TempDir() 4161 confA := createConfFile(t, []byte(fmt.Sprintf(` 4162 listen: 127.0.0.1:-1 4163 %s 4164 jetstream: { domain: da, store_dir: '%s', max_mem: 50Mb, max_file: 50Mb } 4165 leafnodes: { 4166 listen: 127.0.0.1:-1 4167 no_advertise: true 4168 authorization: { 4169 timeout: 0.5 4170 } 4171 } 4172 `, accs, sd1))) 4173 sA, _ := RunServerWithConfig(confA) 4174 defer sA.Shutdown() 4175 4176 sd2 := t.TempDir() 4177 confL := createConfFile(t, []byte(fmt.Sprintf(` 4178 listen: 127.0.0.1:-1 4179 %s 4180 jetstream: { domain: dl, store_dir: '%s', max_mem: 50Mb, max_file: 50Mb } 4181 leafnodes:{ 4182 no_advertise: true 4183 remotes:[{url:nats://a1:a1@127.0.0.1:%d, account: A}, 4184 {url:nats://s1:s1@127.0.0.1:%d, account: SYS}] 4185 } 4186 `, accs, sd2, sA.opts.LeafNode.Port, sA.opts.LeafNode.Port))) 4187 sL, _ := RunServerWithConfig(confL) 4188 defer sL.Shutdown() 4189 4190 ncA := natsConnect(t, sA.ClientURL(), nats.UserInfo("a1", "a1")) 4191 defer ncA.Close() 4192 ncL := natsConnect(t, sL.ClientURL(), nats.UserInfo("a1", "a1")) 4193 defer ncL.Close() 4194 4195 test := func(jsA, jsL nats.JetStreamContext) { 4196 kvA, err := jsA.CreateKeyValue(&nats.KeyValueConfig{Bucket: "bucket"}) 4197 require_NoError(t, err) 4198 kvL, err := jsL.CreateKeyValue(&nats.KeyValueConfig{Bucket: "bucket"}) 4199 require_NoError(t, err) 4200 4201 _, err = kvA.Put("A", nil) 4202 require_NoError(t, err) 4203 _, err = kvL.Put("L", nil) 4204 require_NoError(t, err) 4205 4206 // check for unwanted cross talk 4207 _, err = kvA.Get("A") 4208 require_NoError(t, err) 4209 _, err = kvA.Get("l") 4210 require_Error(t, err) 4211 require_True(t, err == nats.ErrKeyNotFound) 4212 4213 _, err = kvL.Get("A") 4214 require_Error(t, err) 4215 require_True(t, err == nats.ErrKeyNotFound) 4216 _, err = kvL.Get("L") 4217 require_NoError(t, err) 4218 4219 err = jsA.DeleteKeyValue("bucket") 4220 require_NoError(t, err) 4221 err = jsL.DeleteKeyValue("bucket") 4222 require_NoError(t, err) 4223 } 4224 4225 jsA, err := ncA.JetStream() 4226 require_NoError(t, err) 4227 jsL, err := ncL.JetStream() 4228 require_NoError(t, err) 4229 test(jsA, jsL) 4230 4231 jsAL, err := ncA.JetStream(nats.Domain("dl")) 4232 require_NoError(t, err) 4233 jsLA, err := ncL.JetStream(nats.Domain("da")) 4234 require_NoError(t, err) 4235 test(jsAL, jsLA) 4236 4237 jsAA, err := ncA.JetStream(nats.Domain("da")) 4238 require_NoError(t, err) 4239 jsLL, err := ncL.JetStream(nats.Domain("dl")) 4240 require_NoError(t, err) 4241 test(jsAA, jsLL) 4242 } 4243 4244 type checkLeafMinVersionLogger struct { 4245 DummyLogger 4246 errCh chan string 4247 connCh chan string 4248 } 4249 4250 func (l *checkLeafMinVersionLogger) Errorf(format string, args ...interface{}) { 4251 msg := fmt.Sprintf(format, args...) 4252 if strings.Contains(msg, "minimum version") { 4253 select { 4254 case l.errCh <- msg: 4255 default: 4256 } 4257 } 4258 } 4259 4260 func (l *checkLeafMinVersionLogger) Noticef(format string, args ...interface{}) { 4261 msg := fmt.Sprintf(format, args...) 4262 if strings.Contains(msg, "Leafnode connection created") { 4263 select { 4264 case l.connCh <- msg: 4265 default: 4266 } 4267 } 4268 } 4269 4270 func TestLeafNodeMinVersion(t *testing.T) { 4271 conf := createConfFile(t, []byte(` 4272 port: -1 4273 leafnodes { 4274 port: -1 4275 min_version: 2.8.0 4276 } 4277 `)) 4278 s, o := RunServerWithConfig(conf) 4279 defer s.Shutdown() 4280 4281 rconf := createConfFile(t, []byte(fmt.Sprintf(` 4282 port: -1 4283 leafnodes { 4284 remotes [ 4285 {url: "nats://127.0.0.1:%d" } 4286 ] 4287 } 4288 `, o.LeafNode.Port))) 4289 ln, _ := RunServerWithConfig(rconf) 4290 defer ln.Shutdown() 4291 4292 checkLeafNodeConnected(t, s) 4293 checkLeafNodeConnected(t, ln) 4294 4295 ln.Shutdown() 4296 s.Shutdown() 4297 4298 // Now makes sure we validate options, not just config file. 4299 for _, test := range []struct { 4300 name string 4301 version string 4302 err string 4303 }{ 4304 {"invalid version", "abc", "semver"}, 4305 {"version too low", "2.7.9", "the minimum version should be at least 2.8.0"}, 4306 } { 4307 t.Run(test.name, func(t *testing.T) { 4308 o.Port = -1 4309 o.LeafNode.Port = -1 4310 o.LeafNode.MinVersion = test.version 4311 if s, err := NewServer(o); err == nil || !strings.Contains(err.Error(), test.err) { 4312 if s != nil { 4313 s.Shutdown() 4314 } 4315 t.Fatalf("Expected error to contain %q, got %v", test.err, err) 4316 } 4317 }) 4318 } 4319 4320 // Ok, so now to verify that a server rejects a leafnode connection 4321 // we will set the min_version above our current VERSION. So first 4322 // decompose the version: 4323 major, minor, _, err := versionComponents(VERSION) 4324 if err != nil { 4325 t.Fatalf("The current server version %q is not valid: %v", VERSION, err) 4326 } 4327 // Let's make our minimum server an minor version above 4328 mv := fmt.Sprintf("%d.%d.0", major, minor+1) 4329 conf = createConfFile(t, []byte(fmt.Sprintf(` 4330 port: -1 4331 leafnodes { 4332 port: -1 4333 min_version: "%s" 4334 } 4335 `, mv))) 4336 s, o = RunServerWithConfig(conf) 4337 defer s.Shutdown() 4338 4339 l := &checkLeafMinVersionLogger{errCh: make(chan string, 1), connCh: make(chan string, 1)} 4340 s.SetLogger(l, false, false) 4341 4342 rconf = createConfFile(t, []byte(fmt.Sprintf(` 4343 port: -1 4344 leafnodes { 4345 remotes [ 4346 {url: "nats://127.0.0.1:%d" } 4347 ] 4348 } 4349 `, o.LeafNode.Port))) 4350 lo := LoadConfig(rconf) 4351 lo.LeafNode.ReconnectInterval = 50 * time.Millisecond 4352 ln = RunServer(lo) 4353 defer ln.Shutdown() 4354 4355 select { 4356 case <-l.connCh: 4357 case <-time.After(time.Second): 4358 t.Fatal("Remote did not try to connect") 4359 } 4360 4361 select { 4362 case <-l.errCh: 4363 case <-time.After(time.Second): 4364 t.Fatal("Did not get the minimum version required error") 4365 } 4366 4367 // Since we have a very small reconnect interval, if the connection was 4368 // closed "right away", then we should have had a reconnect attempt with 4369 // another failure. This should not be the case because the server will 4370 // wait 5s before closing the connection. 4371 select { 4372 case <-l.connCh: 4373 t.Fatal("Should not have tried to reconnect") 4374 case <-time.After(250 * time.Millisecond): 4375 // OK 4376 } 4377 } 4378 4379 func TestLeafNodeStreamAndShadowSubs(t *testing.T) { 4380 hubConf := createConfFile(t, []byte(` 4381 port: -1 4382 leafnodes { 4383 port: -1 4384 authorization: { 4385 user: leaf 4386 password: leaf 4387 account: B 4388 } 4389 } 4390 accounts: { 4391 A: { 4392 users = [{user: usrA, password: usrA}] 4393 exports: [{stream: foo.*.>}] 4394 } 4395 B: { 4396 imports: [{stream: {account: A, subject: foo.*.>}}] 4397 } 4398 } 4399 `)) 4400 hub, hubo := RunServerWithConfig(hubConf) 4401 defer hub.Shutdown() 4402 4403 leafConfContet := fmt.Sprintf(` 4404 port: -1 4405 leafnodes { 4406 remotes = [ 4407 { 4408 url: "nats-leaf://leaf:leaf@127.0.0.1:%d" 4409 account: B 4410 } 4411 ] 4412 } 4413 accounts: { 4414 B: { 4415 exports: [{stream: foo.*.>}] 4416 } 4417 C: { 4418 users: [{user: usrC, password: usrC}] 4419 imports: [{stream: {account: B, subject: foo.bar.>}}] 4420 } 4421 } 4422 `, hubo.LeafNode.Port) 4423 leafConf := createConfFile(t, []byte(leafConfContet)) 4424 leafo := LoadConfig(leafConf) 4425 leafo.LeafNode.ReconnectInterval = 50 * time.Millisecond 4426 leaf := RunServer(leafo) 4427 defer leaf.Shutdown() 4428 4429 checkLeafNodeConnected(t, hub) 4430 checkLeafNodeConnected(t, leaf) 4431 4432 subPubAndCheck := func() { 4433 t.Helper() 4434 4435 ncl, err := nats.Connect(leaf.ClientURL(), nats.UserInfo("usrC", "usrC")) 4436 if err != nil { 4437 t.Fatalf("Error connecting: %v", err) 4438 } 4439 defer ncl.Close() 4440 4441 // This will send an LS+ to the "hub" server. 4442 sub, err := ncl.SubscribeSync("foo.*.baz") 4443 if err != nil { 4444 t.Fatalf("Error subscribing: %v", err) 4445 } 4446 ncl.Flush() 4447 4448 ncm, err := nats.Connect(hub.ClientURL(), nats.UserInfo("usrA", "usrA")) 4449 if err != nil { 4450 t.Fatalf("Error connecting: %v", err) 4451 } 4452 defer ncm.Close() 4453 4454 // Try a few times in case subject interest has not propagated yet 4455 for i := 0; i < 5; i++ { 4456 ncm.Publish("foo.bar.baz", []byte("msg")) 4457 if _, err := sub.NextMsg(time.Second); err == nil { 4458 // OK, done! 4459 return 4460 } 4461 } 4462 t.Fatal("Message was not received") 4463 } 4464 subPubAndCheck() 4465 4466 // Now cause a restart of the accepting side so that the leaf connection 4467 // is recreated. 4468 hub.Shutdown() 4469 hub = RunServer(hubo) 4470 defer hub.Shutdown() 4471 4472 checkLeafNodeConnected(t, hub) 4473 checkLeafNodeConnected(t, leaf) 4474 4475 subPubAndCheck() 4476 4477 // Issue a config reload even though we make no modification. There was 4478 // a defect that caused the interest propagation to break. 4479 // Set the ReconnectInterval to the default value so that reload does not complain. 4480 leaf.getOpts().LeafNode.ReconnectInterval = DEFAULT_LEAF_NODE_RECONNECT 4481 reloadUpdateConfig(t, leaf, leafConf, leafConfContet) 4482 4483 // Check again 4484 subPubAndCheck() 4485 } 4486 4487 func TestLeafNodeAuthConfigReload(t *testing.T) { 4488 template := ` 4489 listen: 127.0.0.1:-1 4490 accounts { test: {} } 4491 leaf { 4492 listen: "127.0.0.1:7422" 4493 tls { 4494 cert_file: "../test/configs/certs/server-cert.pem" 4495 key_file: "../test/configs/certs/server-key.pem" 4496 ca_file: "../test/configs/certs/ca.pem" 4497 } 4498 authorization { 4499 # These are only fields allowed atm. 4500 users = [ { user: test, password: "s3cret1", account: "test" } ] 4501 } 4502 } 4503 ` 4504 conf := createConfFile(t, []byte(template)) 4505 4506 s, _ := RunServerWithConfig(conf) 4507 defer s.Shutdown() 4508 4509 lg := &captureErrorLogger{errCh: make(chan string, 10)} 4510 s.SetLogger(lg, false, false) 4511 4512 // Reload here should work ok. 4513 reloadUpdateConfig(t, s, conf, template) 4514 } 4515 4516 func TestLeafNodeSignatureCB(t *testing.T) { 4517 content := ` 4518 port: -1 4519 server_name: OP 4520 operator = "../test/configs/nkeys/op.jwt" 4521 resolver = MEMORY 4522 listen: "127.0.0.1:-1" 4523 leafnodes { 4524 listen: "127.0.0.1:-1" 4525 } 4526 ` 4527 conf := createConfFile(t, []byte(content)) 4528 s, opts := RunServerWithConfig(conf) 4529 defer s.Shutdown() 4530 4531 _, akp := createAccount(s) 4532 kp, _ := nkeys.CreateUser() 4533 pub, _ := kp.PublicKey() 4534 nuc := jwt.NewUserClaims(pub) 4535 ujwt, err := nuc.Encode(akp) 4536 if err != nil { 4537 t.Fatalf("Error generating user JWT: %v", err) 4538 } 4539 4540 lopts := &DefaultTestOptions 4541 u, err := url.Parse(fmt.Sprintf("nats://%s:%d", opts.LeafNode.Host, opts.LeafNode.Port)) 4542 if err != nil { 4543 t.Fatalf("Error parsing url: %v", err) 4544 } 4545 remote := &RemoteLeafOpts{URLs: []*url.URL{u}} 4546 remote.SignatureCB = func(nonce []byte) (string, []byte, error) { 4547 return "", nil, fmt.Errorf("on purpose") 4548 } 4549 lopts.LeafNode.Remotes = []*RemoteLeafOpts{remote} 4550 lopts.LeafNode.ReconnectInterval = 100 * time.Millisecond 4551 sl := RunServer(lopts) 4552 defer sl.Shutdown() 4553 4554 slog := &captureErrorLogger{errCh: make(chan string, 10)} 4555 sl.SetLogger(slog, false, false) 4556 4557 // Now check that the leafnode got the error that the callback returned. 4558 select { 4559 case err := <-slog.errCh: 4560 if !strings.Contains(err, "on purpose") { 4561 t.Fatalf("Expected error from cb, got %v", err) 4562 } 4563 case <-time.After(time.Second): 4564 t.Fatal("Did not get expected error") 4565 } 4566 4567 sl.Shutdown() 4568 // Now check what happens if the connection is closed while in the callback. 4569 blockCh := make(chan struct{}) 4570 remote.SignatureCB = func(nonce []byte) (string, []byte, error) { 4571 <-blockCh 4572 sig, err := kp.Sign(nonce) 4573 return ujwt, sig, err 4574 } 4575 sl = RunServer(lopts) 4576 defer sl.Shutdown() 4577 4578 // Recreate the logger so that we are sure not to have possible previous errors 4579 slog = &captureErrorLogger{errCh: make(chan string, 10)} 4580 sl.SetLogger(slog, false, false) 4581 4582 // Get the leaf connection from the temp clients map and close it. 4583 checkFor(t, time.Second, 15*time.Millisecond, func() error { 4584 var c *client 4585 sl.grMu.Lock() 4586 for _, cli := range sl.grTmpClients { 4587 c = cli 4588 } 4589 sl.grMu.Unlock() 4590 if c == nil { 4591 return fmt.Errorf("Client still not found in temp map") 4592 } 4593 c.closeConnection(ClientClosed) 4594 return nil 4595 }) 4596 4597 // Release the callback, and check we get the appropriate error. 4598 close(blockCh) 4599 select { 4600 case err := <-slog.errCh: 4601 if !strings.Contains(err, ErrConnectionClosed.Error()) { 4602 t.Fatalf("Expected error that connection was closed, got %v", err) 4603 } 4604 case <-time.After(time.Second): 4605 t.Fatal("Did not get expected error") 4606 } 4607 4608 sl.Shutdown() 4609 // Change to a good CB and now it should work 4610 remote.SignatureCB = func(nonce []byte) (string, []byte, error) { 4611 sig, err := kp.Sign(nonce) 4612 return ujwt, sig, err 4613 } 4614 sl = RunServer(lopts) 4615 defer sl.Shutdown() 4616 checkLeafNodeConnected(t, sl) 4617 } 4618 4619 type testLeafTraceLogger struct { 4620 DummyLogger 4621 ch chan string 4622 } 4623 4624 func (l *testLeafTraceLogger) Tracef(format string, v ...interface{}) { 4625 msg := fmt.Sprintf(format, v...) 4626 // We will sub to 'baz' and to 'bar', so filter on 'ba' prefix. 4627 if strings.Contains(msg, "[LS+ ba") { 4628 select { 4629 case l.ch <- msg: 4630 default: 4631 } 4632 } 4633 } 4634 4635 // Make sure permissioned denied subs do not make it to the leafnode even if existing. 4636 func TestLeafNodePermsSuppressSubs(t *testing.T) { 4637 conf := createConfFile(t, []byte(` 4638 listen: 127.0.0.1:-1 4639 authorization { 4640 PERMS = { 4641 publish = "foo" 4642 subscribe = ["_INBOX.>"] 4643 } 4644 users = [ 4645 {user: "user", password: "pass"} 4646 {user: "ln", password: "pass" , permissions: $PERMS } 4647 ] 4648 } 4649 no_auth_user: user 4650 4651 leafnodes { 4652 listen: 127.0.0.1:7422 4653 } 4654 `)) 4655 4656 lconf := createConfFile(t, []byte(` 4657 listen: 127.0.0.1:-1 4658 leafnodes { 4659 remotes = [ { url: "nats://ln:pass@127.0.0.1" } ] 4660 } 4661 trace = true 4662 `)) 4663 4664 s, _ := RunServerWithConfig(conf) 4665 defer s.Shutdown() 4666 4667 // Connect client to the hub. 4668 nc, err := nats.Connect(s.ClientURL()) 4669 require_NoError(t, err) 4670 4671 // This should not be seen on leafnode side since we only allow pub to "foo" 4672 _, err = nc.SubscribeSync("baz") 4673 require_NoError(t, err) 4674 4675 ln, _ := RunServerWithConfig(lconf) 4676 defer ln.Shutdown() 4677 4678 // Setup logger to capture trace events. 4679 l := &testLeafTraceLogger{ch: make(chan string, 10)} 4680 ln.SetLogger(l, true, true) 4681 4682 checkLeafNodeConnected(t, ln) 4683 4684 // Need to have ot reconnect to trigger since logger attaches too late. 4685 ln.mu.Lock() 4686 for _, c := range ln.leafs { 4687 c.mu.Lock() 4688 c.nc.Close() 4689 c.mu.Unlock() 4690 } 4691 ln.mu.Unlock() 4692 checkLeafNodeConnectedCount(t, ln, 0) 4693 checkLeafNodeConnectedCount(t, ln, 1) 4694 4695 select { 4696 case msg := <-l.ch: 4697 t.Fatalf("Unexpected LS+ seen on leafnode: %s", msg) 4698 case <-time.After(50 * time.Millisecond): 4699 // OK 4700 } 4701 4702 // Now double check that new subs also do not propagate. 4703 // This behavior was working already. 4704 _, err = nc.SubscribeSync("bar") 4705 require_NoError(t, err) 4706 4707 select { 4708 case msg := <-l.ch: 4709 t.Fatalf("Unexpected LS+ seen on leafnode: %s", msg) 4710 case <-time.After(50 * time.Millisecond): 4711 // OK 4712 } 4713 } 4714 4715 func TestLeafNodeDuplicateMsg(t *testing.T) { 4716 // This involves 2 clusters with leafnodes to each other with a different 4717 // account, and those accounts import/export a subject that caused 4718 // duplicate messages. This test requires static ports since we need to 4719 // have A->B and B->A. 4720 a1Conf := createConfFile(t, []byte(` 4721 cluster : { 4722 name : A 4723 port : -1 4724 } 4725 leafnodes : { 4726 port : 14333 4727 remotes : [{ 4728 account : A 4729 urls : [nats://leafa:pwd@127.0.0.1:24333] 4730 }] 4731 } 4732 port : -1 4733 server_name : A_1 4734 4735 accounts:{ 4736 A:{ 4737 users:[ 4738 {user: leafa, password: pwd}, 4739 {user: usera, password: usera, permissions: { 4740 publish:{ allow:["iot.b.topic"] } 4741 subscribe:{ allow:["iot.a.topic"] } 4742 }} 4743 ] 4744 imports:[ 4745 {stream:{account:"B", subject:"iot.a.topic"}} 4746 ] 4747 }, 4748 B:{ 4749 users:[ 4750 {user: leafb, password: pwd}, 4751 ] 4752 exports:[ 4753 {stream: "iot.a.topic", accounts: ["A"]} 4754 ] 4755 } 4756 } 4757 `)) 4758 a1, oa1 := RunServerWithConfig(a1Conf) 4759 defer a1.Shutdown() 4760 4761 a2Conf := createConfFile(t, []byte(fmt.Sprintf(` 4762 cluster : { 4763 name : A 4764 port : -1 4765 routes : [nats://127.0.0.1:%d] 4766 } 4767 leafnodes : { 4768 port : 14334 4769 remotes : [{ 4770 account : A 4771 urls : [nats://leafa:pwd@127.0.0.1:24334] 4772 }] 4773 } 4774 port : -1 4775 server_name : A_2 4776 4777 accounts:{ 4778 A:{ 4779 users:[ 4780 {user: leafa, password: pwd}, 4781 {user: usera, password: usera, permissions: { 4782 publish:{ allow:["iot.b.topic"] } 4783 subscribe:{ allow:["iot.a.topic"] } 4784 }} 4785 ] 4786 imports:[ 4787 {stream:{account:"B", subject:"iot.a.topic"}} 4788 ] 4789 }, 4790 B:{ 4791 users:[ 4792 {user: leafb, password: pwd}, 4793 ] 4794 exports:[ 4795 {stream: "iot.a.topic", accounts: ["A"]} 4796 ] 4797 } 4798 }`, oa1.Cluster.Port))) 4799 a2, _ := RunServerWithConfig(a2Conf) 4800 defer a2.Shutdown() 4801 4802 checkClusterFormed(t, a1, a2) 4803 4804 b1Conf := createConfFile(t, []byte(` 4805 cluster : { 4806 name : B 4807 port : -1 4808 } 4809 leafnodes : { 4810 port : 24333 4811 remotes : [{ 4812 account : B 4813 urls : [nats://leafb:pwd@127.0.0.1:14333] 4814 }] 4815 } 4816 port : -1 4817 server_name : B_1 4818 4819 accounts:{ 4820 A:{ 4821 users:[ 4822 {user: leafa, password: pwd}, 4823 ] 4824 exports:[ 4825 {stream: "iot.b.topic", accounts: ["B"]} 4826 ] 4827 }, 4828 B:{ 4829 users:[ 4830 {user: leafb, password: pwd}, 4831 {user: userb, password: userb, permissions: { 4832 publish:{ allow:["iot.a.topic"] }, 4833 subscribe:{ allow:["iot.b.topic"] } 4834 }} 4835 ] 4836 imports:[ 4837 {stream:{account:"A", subject:"iot.b.topic"}} 4838 ] 4839 } 4840 }`)) 4841 b1, ob1 := RunServerWithConfig(b1Conf) 4842 defer b1.Shutdown() 4843 4844 b2Conf := createConfFile(t, []byte(fmt.Sprintf(` 4845 cluster : { 4846 name : B 4847 port : -1 4848 routes : [nats://127.0.0.1:%d] 4849 } 4850 leafnodes : { 4851 port : 24334 4852 remotes : [{ 4853 account : B 4854 urls : [nats://leafb:pwd@127.0.0.1:14334] 4855 }] 4856 } 4857 port : -1 4858 server_name : B_2 4859 4860 accounts:{ 4861 A:{ 4862 users:[ 4863 {user: leafa, password: pwd}, 4864 ] 4865 exports:[ 4866 {stream: "iot.b.topic", accounts: ["B"]} 4867 ] 4868 }, 4869 B:{ 4870 users:[ 4871 {user: leafb, password: pwd}, 4872 {user: userb, password: userb, permissions: { 4873 publish:{ allow:["iot.a.topic"] }, 4874 subscribe:{ allow:["iot.b.topic"] } 4875 }} 4876 ] 4877 imports:[ 4878 {stream:{account:"A", subject:"iot.b.topic"}} 4879 ] 4880 } 4881 }`, ob1.Cluster.Port))) 4882 b2, _ := RunServerWithConfig(b2Conf) 4883 defer b2.Shutdown() 4884 4885 checkClusterFormed(t, b1, b2) 4886 4887 checkLeafNodeConnectedCount(t, a1, 2) 4888 checkLeafNodeConnectedCount(t, a2, 2) 4889 checkLeafNodeConnectedCount(t, b1, 2) 4890 checkLeafNodeConnectedCount(t, b2, 2) 4891 4892 check := func(t *testing.T, subSrv *Server, pubSrv *Server) { 4893 4894 sc := natsConnect(t, subSrv.ClientURL(), nats.UserInfo("userb", "userb")) 4895 defer sc.Close() 4896 4897 subject := "iot.b.topic" 4898 sub := natsSubSync(t, sc, subject) 4899 4900 // Wait for this to be available in A cluster 4901 checkSubInterest(t, a1, "A", subject, time.Second) 4902 checkSubInterest(t, a2, "A", subject, time.Second) 4903 4904 pb := natsConnect(t, pubSrv.ClientURL(), nats.UserInfo("usera", "usera")) 4905 defer pb.Close() 4906 4907 natsPub(t, pb, subject, []byte("msg")) 4908 natsNexMsg(t, sub, time.Second) 4909 // Should be only 1 4910 if msg, err := sub.NextMsg(100 * time.Millisecond); err == nil { 4911 t.Fatalf("Received duplicate on %q: %s", msg.Subject, msg.Data) 4912 } 4913 } 4914 t.Run("sub_b1_pub_a1", func(t *testing.T) { check(t, b1, a1) }) 4915 t.Run("sub_b1_pub_a2", func(t *testing.T) { check(t, b1, a2) }) 4916 t.Run("sub_b2_pub_a1", func(t *testing.T) { check(t, b2, a1) }) 4917 t.Run("sub_b2_pub_a2", func(t *testing.T) { check(t, b2, a2) }) 4918 } 4919 4920 func TestLeafNodeTLSHandshakeFirstVerifyNoInfoSent(t *testing.T) { 4921 confHub := createConfFile(t, []byte(` 4922 port : -1 4923 leafnodes : { 4924 port : -1 4925 tls { 4926 cert_file: "../test/configs/certs/server-cert.pem" 4927 key_file: "../test/configs/certs/server-key.pem" 4928 ca_file: "../test/configs/certs/ca.pem" 4929 timeout: 2 4930 handshake_first: true 4931 } 4932 } 4933 `)) 4934 s1, o1 := RunServerWithConfig(confHub) 4935 defer s1.Shutdown() 4936 4937 c, err := net.DialTimeout("tcp", fmt.Sprintf("127.0.0.1:%d", o1.LeafNode.Port), 2*time.Second) 4938 require_NoError(t, err) 4939 defer c.Close() 4940 4941 buf := make([]byte, 1024) 4942 // We will wait for up to 500ms to see if the server is sending (incorrectly) 4943 // the INFO. 4944 c.SetReadDeadline(time.Now().Add(500 * time.Millisecond)) 4945 n, err := c.Read(buf) 4946 c.SetReadDeadline(time.Time{}) 4947 // If we did not get an error, this is an issue... 4948 if err == nil { 4949 t.Fatalf("Should not have received anything, got n=%v buf=%s", n, buf[:n]) 4950 } 4951 // We expect a timeout error 4952 if ne, ok := err.(net.Error); !ok || !ne.Timeout() { 4953 t.Fatalf("Expected a timeout error, got %v", err) 4954 } 4955 } 4956 4957 func TestLeafNodeTLSHandshakeFirst(t *testing.T) { 4958 tmpl1 := ` 4959 port : -1 4960 leafnodes : { 4961 port : -1 4962 tls { 4963 cert_file: "../test/configs/certs/server-cert.pem" 4964 key_file: "../test/configs/certs/server-key.pem" 4965 ca_file: "../test/configs/certs/ca.pem" 4966 timeout: 2 4967 handshake_first: %s 4968 } 4969 } 4970 ` 4971 confHub := createConfFile(t, []byte(fmt.Sprintf(tmpl1, "true"))) 4972 s1, o1 := RunServerWithConfig(confHub) 4973 defer s1.Shutdown() 4974 4975 tmpl2 := ` 4976 port: -1 4977 leafnodes : { 4978 port : -1 4979 remotes : [ 4980 { 4981 urls : [tls://127.0.0.1:%d] 4982 tls { 4983 cert_file: "../test/configs/certs/client-cert.pem" 4984 key_file: "../test/configs/certs/client-key.pem" 4985 ca_file: "../test/configs/certs/ca.pem" 4986 timeout: 2 4987 first: %s 4988 } 4989 } 4990 ] 4991 } 4992 ` 4993 confSpoke := createConfFile(t, []byte(fmt.Sprintf(tmpl2, o1.LeafNode.Port, "true"))) 4994 s2, _ := RunServerWithConfig(confSpoke) 4995 defer s2.Shutdown() 4996 4997 checkLeafNodeConnected(t, s2) 4998 4999 s2.Shutdown() 5000 5001 // Now check that there will be a failure if the remote does not ask for 5002 // handshake first since the hub is configured that way. 5003 // Set a logger on s1 to capture errors 5004 l := &captureErrorLogger{errCh: make(chan string, 10)} 5005 s1.SetLogger(l, false, false) 5006 5007 confSpoke = createConfFile(t, []byte(fmt.Sprintf(tmpl2, o1.LeafNode.Port, "false"))) 5008 s2, _ = RunServerWithConfig(confSpoke) 5009 defer s2.Shutdown() 5010 5011 select { 5012 case err := <-l.errCh: 5013 if !strings.Contains(err, "handshake error") { 5014 t.Fatalf("Unexpected error: %v", err) 5015 } 5016 case <-time.After(2 * time.Second): 5017 t.Fatal("Did not get TLS handshake failure") 5018 } 5019 5020 // Check configuration reload for this remote 5021 reloadUpdateConfig(t, s2, confSpoke, fmt.Sprintf(tmpl2, o1.LeafNode.Port, "true")) 5022 checkLeafNodeConnected(t, s2) 5023 s2.Shutdown() 5024 5025 // Drain the logger error channel 5026 for done := false; !done; { 5027 select { 5028 case <-l.errCh: 5029 default: 5030 done = true 5031 } 5032 } 5033 5034 // Now change the config on the hub 5035 reloadUpdateConfig(t, s1, confHub, fmt.Sprintf(tmpl1, "false")) 5036 // Restart s2 5037 s2, _ = RunServerWithConfig(confSpoke) 5038 defer s2.Shutdown() 5039 5040 select { 5041 case err := <-l.errCh: 5042 if !strings.Contains(err, "handshake error") { 5043 t.Fatalf("Unexpected error: %v", err) 5044 } 5045 case <-time.After(2 * time.Second): 5046 t.Fatal("Did not get TLS handshake failure") 5047 } 5048 5049 // Reload again with "true" 5050 reloadUpdateConfig(t, s1, confHub, fmt.Sprintf(tmpl1, "true")) 5051 checkLeafNodeConnected(t, s2) 5052 } 5053 5054 func TestLeafNodeTLSHandshakeEvenForRemoteWithNoTLSBlock(t *testing.T) { 5055 confHub := createConfFile(t, []byte(` 5056 port : -1 5057 leafnodes : { 5058 port : -1 5059 tls { 5060 cert_file: "../test/configs/certs/server-cert.pem" 5061 key_file: "../test/configs/certs/server-key.pem" 5062 ca_file: "../test/configs/certs/ca.pem" 5063 timeout: 2 5064 } 5065 } 5066 `)) 5067 s1, o1 := RunServerWithConfig(confHub) 5068 defer s1.Shutdown() 5069 5070 tmpl2 := ` 5071 port: -1 5072 leafnodes : { 5073 port : -1 5074 remotes : [ 5075 { 5076 urls : [tls://127.0.0.1:%d] 5077 } 5078 ] 5079 } 5080 ` 5081 confSpoke := createConfFile(t, []byte(fmt.Sprintf(tmpl2, o1.LeafNode.Port))) 5082 s2, _ := RunServerWithConfig(confSpoke) 5083 defer s2.Shutdown() 5084 5085 l := &captureDebugLogger{dbgCh: make(chan string, 100)} 5086 s2.SetLogger(l, true, false) 5087 5088 tm := time.NewTimer(2 * time.Second) 5089 defer tm.Stop() 5090 for { 5091 select { 5092 case l := <-l.dbgCh: 5093 if strings.Contains(l, "Starting TLS") { 5094 // OK! 5095 return 5096 } 5097 case <-tm.C: 5098 t.Fatalf("Did not perform a TLS handshake") 5099 } 5100 } 5101 } 5102 5103 func TestLeafNodeCompressionOptions(t *testing.T) { 5104 org := testDefaultLeafNodeCompression 5105 testDefaultLeafNodeCompression = _EMPTY_ 5106 defer func() { testDefaultLeafNodeCompression = org }() 5107 5108 tmpl := ` 5109 port: -1 5110 leafnodes { 5111 port: -1 5112 compression: %s 5113 } 5114 ` 5115 for _, test := range []struct { 5116 name string 5117 mode string 5118 rttVals []int 5119 expected string 5120 rtts []time.Duration 5121 }{ 5122 {"boolean enabled", "true", nil, CompressionS2Auto, defaultCompressionS2AutoRTTThresholds}, 5123 {"string enabled", "enabled", nil, CompressionS2Auto, defaultCompressionS2AutoRTTThresholds}, 5124 {"string EnaBled", "EnaBled", nil, CompressionS2Auto, defaultCompressionS2AutoRTTThresholds}, 5125 {"string on", "on", nil, CompressionS2Auto, defaultCompressionS2AutoRTTThresholds}, 5126 {"string ON", "ON", nil, CompressionS2Auto, defaultCompressionS2AutoRTTThresholds}, 5127 {"string fast", "fast", nil, CompressionS2Fast, nil}, 5128 {"string Fast", "Fast", nil, CompressionS2Fast, nil}, 5129 {"string s2_fast", "s2_fast", nil, CompressionS2Fast, nil}, 5130 {"string s2_Fast", "s2_Fast", nil, CompressionS2Fast, nil}, 5131 {"boolean disabled", "false", nil, CompressionOff, nil}, 5132 {"string disabled", "disabled", nil, CompressionOff, nil}, 5133 {"string DisableD", "DisableD", nil, CompressionOff, nil}, 5134 {"string off", "off", nil, CompressionOff, nil}, 5135 {"string OFF", "OFF", nil, CompressionOff, nil}, 5136 {"better", "better", nil, CompressionS2Better, nil}, 5137 {"Better", "Better", nil, CompressionS2Better, nil}, 5138 {"s2_better", "s2_better", nil, CompressionS2Better, nil}, 5139 {"S2_BETTER", "S2_BETTER", nil, CompressionS2Better, nil}, 5140 {"best", "best", nil, CompressionS2Best, nil}, 5141 {"BEST", "BEST", nil, CompressionS2Best, nil}, 5142 {"s2_best", "s2_best", nil, CompressionS2Best, nil}, 5143 {"S2_BEST", "S2_BEST", nil, CompressionS2Best, nil}, 5144 {"auto no rtts", "auto", nil, CompressionS2Auto, defaultCompressionS2AutoRTTThresholds}, 5145 {"s2_auto no rtts", "s2_auto", nil, CompressionS2Auto, defaultCompressionS2AutoRTTThresholds}, 5146 {"auto", "{mode: auto, rtt_thresholds: [%s]}", []int{1}, CompressionS2Auto, []time.Duration{time.Millisecond}}, 5147 {"Auto", "{Mode: Auto, thresholds: [%s]}", []int{1, 2}, CompressionS2Auto, []time.Duration{time.Millisecond, 2 * time.Millisecond}}, 5148 {"s2_auto", "{mode: s2_auto, thresholds: [%s]}", []int{1, 2, 3}, CompressionS2Auto, []time.Duration{time.Millisecond, 2 * time.Millisecond, 3 * time.Millisecond}}, 5149 {"s2_AUTO", "{mode: s2_AUTO, thresholds: [%s]}", []int{1, 2, 3, 4}, CompressionS2Auto, []time.Duration{time.Millisecond, 2 * time.Millisecond, 3 * time.Millisecond, 4 * time.Millisecond}}, 5150 {"s2_auto:-10,5,10", "{mode: s2_auto, thresholds: [%s]}", []int{-10, 5, 10}, CompressionS2Auto, []time.Duration{0, 5 * time.Millisecond, 10 * time.Millisecond}}, 5151 {"s2_auto:5,10,15", "{mode: s2_auto, thresholds: [%s]}", []int{5, 10, 15}, CompressionS2Auto, []time.Duration{5 * time.Millisecond, 10 * time.Millisecond, 15 * time.Millisecond}}, 5152 {"s2_auto:0,5,10", "{mode: s2_auto, thresholds: [%s]}", []int{0, 5, 10}, CompressionS2Auto, []time.Duration{0, 5 * time.Millisecond, 10 * time.Millisecond}}, 5153 {"s2_auto:5,10,0,20", "{mode: s2_auto, thresholds: [%s]}", []int{5, 10, 0, 20}, CompressionS2Auto, []time.Duration{5 * time.Millisecond, 10 * time.Millisecond, 0, 20 * time.Millisecond}}, 5154 {"s2_auto:0,10,0,20", "{mode: s2_auto, thresholds: [%s]}", []int{0, 10, 0, 20}, CompressionS2Auto, []time.Duration{0, 10 * time.Millisecond, 0, 20 * time.Millisecond}}, 5155 {"s2_auto:0,0,0,20", "{mode: s2_auto, thresholds: [%s]}", []int{0, 0, 0, 20}, CompressionS2Auto, []time.Duration{0, 0, 0, 20 * time.Millisecond}}, 5156 {"s2_auto:0,10,0,0", "{mode: s2_auto, rtt_thresholds: [%s]}", []int{0, 10, 0, 0}, CompressionS2Auto, []time.Duration{0, 10 * time.Millisecond}}, 5157 } { 5158 t.Run(test.name, func(t *testing.T) { 5159 var val string 5160 if len(test.rttVals) > 0 { 5161 var rtts string 5162 for i, v := range test.rttVals { 5163 if i > 0 { 5164 rtts += ", " 5165 } 5166 rtts += fmt.Sprintf("%dms", v) 5167 } 5168 val = fmt.Sprintf(test.mode, rtts) 5169 } else { 5170 val = test.mode 5171 } 5172 conf := createConfFile(t, []byte(fmt.Sprintf(tmpl, val))) 5173 s, o := RunServerWithConfig(conf) 5174 defer s.Shutdown() 5175 5176 if cm := o.LeafNode.Compression.Mode; cm != test.expected { 5177 t.Fatalf("Expected compression value to be %q, got %q", test.expected, cm) 5178 } 5179 if !reflect.DeepEqual(test.rtts, o.LeafNode.Compression.RTTThresholds) { 5180 t.Fatalf("Expected RTT tresholds to be %+v, got %+v", test.rtts, o.LeafNode.Compression.RTTThresholds) 5181 } 5182 s.Shutdown() 5183 5184 o.LeafNode.Port = -1 5185 o.LeafNode.Compression.Mode = test.mode 5186 if len(test.rttVals) > 0 { 5187 o.LeafNode.Compression.Mode = CompressionS2Auto 5188 o.LeafNode.Compression.RTTThresholds = o.LeafNode.Compression.RTTThresholds[:0] 5189 for _, v := range test.rttVals { 5190 o.LeafNode.Compression.RTTThresholds = append(o.LeafNode.Compression.RTTThresholds, time.Duration(v)*time.Millisecond) 5191 } 5192 } 5193 s = RunServer(o) 5194 defer s.Shutdown() 5195 if cm := o.LeafNode.Compression.Mode; cm != test.expected { 5196 t.Fatalf("Expected compression value to be %q, got %q", test.expected, cm) 5197 } 5198 if !reflect.DeepEqual(test.rtts, o.LeafNode.Compression.RTTThresholds) { 5199 t.Fatalf("Expected RTT tresholds to be %+v, got %+v", test.rtts, o.LeafNode.Compression.RTTThresholds) 5200 } 5201 }) 5202 } 5203 5204 // Same, but with remotes 5205 tmpl = ` 5206 port: -1 5207 leafnodes { 5208 port: -1 5209 remotes [ 5210 { 5211 url: "nats://127.0.0.1:1234" 5212 compression: %s 5213 } 5214 ] 5215 } 5216 ` 5217 for _, test := range []struct { 5218 name string 5219 mode string 5220 rttVals []int 5221 expected string 5222 rtts []time.Duration 5223 }{ 5224 {"boolean enabled", "true", nil, CompressionS2Auto, defaultCompressionS2AutoRTTThresholds}, 5225 {"string enabled", "enabled", nil, CompressionS2Auto, defaultCompressionS2AutoRTTThresholds}, 5226 {"string EnaBled", "EnaBled", nil, CompressionS2Auto, defaultCompressionS2AutoRTTThresholds}, 5227 {"string on", "on", nil, CompressionS2Auto, defaultCompressionS2AutoRTTThresholds}, 5228 {"string ON", "ON", nil, CompressionS2Auto, defaultCompressionS2AutoRTTThresholds}, 5229 {"string fast", "fast", nil, CompressionS2Fast, nil}, 5230 {"string Fast", "Fast", nil, CompressionS2Fast, nil}, 5231 {"string s2_fast", "s2_fast", nil, CompressionS2Fast, nil}, 5232 {"string s2_Fast", "s2_Fast", nil, CompressionS2Fast, nil}, 5233 {"boolean disabled", "false", nil, CompressionOff, nil}, 5234 {"string disabled", "disabled", nil, CompressionOff, nil}, 5235 {"string DisableD", "DisableD", nil, CompressionOff, nil}, 5236 {"string off", "off", nil, CompressionOff, nil}, 5237 {"string OFF", "OFF", nil, CompressionOff, nil}, 5238 {"better", "better", nil, CompressionS2Better, nil}, 5239 {"Better", "Better", nil, CompressionS2Better, nil}, 5240 {"s2_better", "s2_better", nil, CompressionS2Better, nil}, 5241 {"S2_BETTER", "S2_BETTER", nil, CompressionS2Better, nil}, 5242 {"best", "best", nil, CompressionS2Best, nil}, 5243 {"BEST", "BEST", nil, CompressionS2Best, nil}, 5244 {"s2_best", "s2_best", nil, CompressionS2Best, nil}, 5245 {"S2_BEST", "S2_BEST", nil, CompressionS2Best, nil}, 5246 {"auto no rtts", "auto", nil, CompressionS2Auto, defaultCompressionS2AutoRTTThresholds}, 5247 {"s2_auto no rtts", "s2_auto", nil, CompressionS2Auto, defaultCompressionS2AutoRTTThresholds}, 5248 {"auto", "{mode: auto, rtt_thresholds: [%s]}", []int{1}, CompressionS2Auto, []time.Duration{time.Millisecond}}, 5249 {"Auto", "{Mode: Auto, thresholds: [%s]}", []int{1, 2}, CompressionS2Auto, []time.Duration{time.Millisecond, 2 * time.Millisecond}}, 5250 {"s2_auto", "{mode: s2_auto, thresholds: [%s]}", []int{1, 2, 3}, CompressionS2Auto, []time.Duration{time.Millisecond, 2 * time.Millisecond, 3 * time.Millisecond}}, 5251 {"s2_AUTO", "{mode: s2_AUTO, thresholds: [%s]}", []int{1, 2, 3, 4}, CompressionS2Auto, []time.Duration{time.Millisecond, 2 * time.Millisecond, 3 * time.Millisecond, 4 * time.Millisecond}}, 5252 {"s2_auto:-10,5,10", "{mode: s2_auto, thresholds: [%s]}", []int{-10, 5, 10}, CompressionS2Auto, []time.Duration{0, 5 * time.Millisecond, 10 * time.Millisecond}}, 5253 {"s2_auto:5,10,15", "{mode: s2_auto, thresholds: [%s]}", []int{5, 10, 15}, CompressionS2Auto, []time.Duration{5 * time.Millisecond, 10 * time.Millisecond, 15 * time.Millisecond}}, 5254 {"s2_auto:0,5,10", "{mode: s2_auto, thresholds: [%s]}", []int{0, 5, 10}, CompressionS2Auto, []time.Duration{0, 5 * time.Millisecond, 10 * time.Millisecond}}, 5255 {"s2_auto:5,10,0,20", "{mode: s2_auto, thresholds: [%s]}", []int{5, 10, 0, 20}, CompressionS2Auto, []time.Duration{5 * time.Millisecond, 10 * time.Millisecond, 0, 20 * time.Millisecond}}, 5256 {"s2_auto:0,10,0,20", "{mode: s2_auto, thresholds: [%s]}", []int{0, 10, 0, 20}, CompressionS2Auto, []time.Duration{0, 10 * time.Millisecond, 0, 20 * time.Millisecond}}, 5257 {"s2_auto:0,0,0,20", "{mode: s2_auto, thresholds: [%s]}", []int{0, 0, 0, 20}, CompressionS2Auto, []time.Duration{0, 0, 0, 20 * time.Millisecond}}, 5258 {"s2_auto:0,10,0,0", "{mode: s2_auto, rtt_thresholds: [%s]}", []int{0, 10, 0, 0}, CompressionS2Auto, []time.Duration{0, 10 * time.Millisecond}}, 5259 } { 5260 t.Run("remote leaf "+test.name, func(t *testing.T) { 5261 var val string 5262 if len(test.rttVals) > 0 { 5263 var rtts string 5264 for i, v := range test.rttVals { 5265 if i > 0 { 5266 rtts += ", " 5267 } 5268 rtts += fmt.Sprintf("%dms", v) 5269 } 5270 val = fmt.Sprintf(test.mode, rtts) 5271 } else { 5272 val = test.mode 5273 } 5274 conf := createConfFile(t, []byte(fmt.Sprintf(tmpl, val))) 5275 s, o := RunServerWithConfig(conf) 5276 defer s.Shutdown() 5277 5278 r := o.LeafNode.Remotes[0] 5279 5280 if cm := r.Compression.Mode; cm != test.expected { 5281 t.Fatalf("Expected compression value to be %q, got %q", test.expected, cm) 5282 } 5283 if !reflect.DeepEqual(test.rtts, r.Compression.RTTThresholds) { 5284 t.Fatalf("Expected RTT tresholds to be %+v, got %+v", test.rtts, r.Compression.RTTThresholds) 5285 } 5286 s.Shutdown() 5287 5288 o.LeafNode.Port = -1 5289 o.LeafNode.Remotes[0].Compression.Mode = test.mode 5290 if len(test.rttVals) > 0 { 5291 o.LeafNode.Remotes[0].Compression.Mode = CompressionS2Auto 5292 o.LeafNode.Remotes[0].Compression.RTTThresholds = o.LeafNode.Remotes[0].Compression.RTTThresholds[:0] 5293 for _, v := range test.rttVals { 5294 o.LeafNode.Remotes[0].Compression.RTTThresholds = append(o.LeafNode.Remotes[0].Compression.RTTThresholds, time.Duration(v)*time.Millisecond) 5295 } 5296 } 5297 s = RunServer(o) 5298 defer s.Shutdown() 5299 if cm := o.LeafNode.Remotes[0].Compression.Mode; cm != test.expected { 5300 t.Fatalf("Expected compression value to be %q, got %q", test.expected, cm) 5301 } 5302 if !reflect.DeepEqual(test.rtts, o.LeafNode.Remotes[0].Compression.RTTThresholds) { 5303 t.Fatalf("Expected RTT tresholds to be %+v, got %+v", test.rtts, o.LeafNode.Remotes[0].Compression.RTTThresholds) 5304 } 5305 }) 5306 } 5307 5308 // Test that with no compression specified, we default to "s2_auto" 5309 conf := createConfFile(t, []byte(` 5310 port: -1 5311 leafnodes { 5312 port: -1 5313 } 5314 `)) 5315 s, o := RunServerWithConfig(conf) 5316 defer s.Shutdown() 5317 if o.LeafNode.Compression.Mode != CompressionS2Auto { 5318 t.Fatalf("Expected compression value to be %q, got %q", CompressionAccept, o.LeafNode.Compression.Mode) 5319 } 5320 if !reflect.DeepEqual(defaultCompressionS2AutoRTTThresholds, o.LeafNode.Compression.RTTThresholds) { 5321 t.Fatalf("Expected RTT tresholds to be %+v, got %+v", defaultCompressionS2AutoRTTThresholds, o.LeafNode.Compression.RTTThresholds) 5322 } 5323 // Same for remotes 5324 conf = createConfFile(t, []byte(` 5325 port: -1 5326 leafnodes { 5327 port: -1 5328 remotes [ { url: "nats://127.0.0.1:1234" } ] 5329 } 5330 `)) 5331 s, o = RunServerWithConfig(conf) 5332 defer s.Shutdown() 5333 if cm := o.LeafNode.Remotes[0].Compression.Mode; cm != CompressionS2Auto { 5334 t.Fatalf("Expected compression value to be %q, got %q", CompressionAccept, cm) 5335 } 5336 if !reflect.DeepEqual(defaultCompressionS2AutoRTTThresholds, o.LeafNode.Remotes[0].Compression.RTTThresholds) { 5337 t.Fatalf("Expected RTT tresholds to be %+v, got %+v", defaultCompressionS2AutoRTTThresholds, o.LeafNode.Remotes[0].Compression.RTTThresholds) 5338 } 5339 for _, test := range []struct { 5340 name string 5341 mode string 5342 rtts []time.Duration 5343 err string 5344 }{ 5345 {"unsupported mode", "gzip", nil, "unsupported"}, 5346 {"not ascending order", "s2_auto", []time.Duration{ 5347 5 * time.Millisecond, 5348 10 * time.Millisecond, 5349 2 * time.Millisecond, 5350 }, "ascending"}, 5351 {"too many thresholds", "s2_auto", []time.Duration{ 5352 5 * time.Millisecond, 5353 10 * time.Millisecond, 5354 20 * time.Millisecond, 5355 40 * time.Millisecond, 5356 60 * time.Millisecond, 5357 }, "more than 4"}, 5358 {"all 0", "s2_auto", []time.Duration{0, 0, 0, 0}, "at least one"}, 5359 {"single 0", "s2_auto", []time.Duration{0}, "at least one"}, 5360 } { 5361 t.Run(test.name, func(t *testing.T) { 5362 o := DefaultOptions() 5363 o.LeafNode.Port = -1 5364 o.LeafNode.Compression = CompressionOpts{test.mode, test.rtts} 5365 if _, err := NewServer(o); err == nil || !strings.Contains(err.Error(), test.err) { 5366 t.Fatalf("Unexpected error: %v", err) 5367 } 5368 // Same with remotes 5369 o.LeafNode.Compression = CompressionOpts{} 5370 o.LeafNode.Remotes = []*RemoteLeafOpts{{Compression: CompressionOpts{test.mode, test.rtts}}} 5371 if _, err := NewServer(o); err == nil || !strings.Contains(err.Error(), test.err) { 5372 t.Fatalf("Unexpected error: %v", err) 5373 } 5374 }) 5375 } 5376 } 5377 5378 func TestLeafNodeCompression(t *testing.T) { 5379 conf1 := createConfFile(t, []byte(` 5380 port: -1 5381 server_name: "Hub" 5382 accounts { 5383 A { users: [{user: a, password: pwd}] } 5384 B { users: [{user: b, password: pwd}] } 5385 C { users: [{user: c, password: pwd}] } 5386 } 5387 leafnodes { 5388 port: -1 5389 compression: s2_fast 5390 } 5391 `)) 5392 s1, o1 := RunServerWithConfig(conf1) 5393 defer s1.Shutdown() 5394 5395 port := o1.LeafNode.Port 5396 conf2 := createConfFile(t, []byte(fmt.Sprintf(` 5397 port: -1 5398 server_name: "Spoke" 5399 accounts { 5400 A { users: [{user: a, password: pwd}] } 5401 B { users: [{user: b, password: pwd}] } 5402 C { users: [{user: c, password: pwd}] } 5403 } 5404 leafnodes { 5405 remotes [ 5406 { url: "nats://a:pwd@127.0.0.1:%d", account: "A", compression: s2_better } 5407 { url: "nats://b:pwd@127.0.0.1:%d", account: "B", compression: s2_best } 5408 { url: "nats://c:pwd@127.0.0.1:%d", account: "C", compression: off } 5409 ] 5410 } 5411 `, port, port, port))) 5412 s2, _ := RunServerWithConfig(conf2) 5413 defer s2.Shutdown() 5414 5415 checkLeafNodeConnectedCount(t, s1, 3) 5416 checkLeafNodeConnectedCount(t, s2, 3) 5417 5418 s1.mu.RLock() 5419 for _, l := range s1.leafs { 5420 l.mu.Lock() 5421 l.nc = &testConnSentBytes{Conn: l.nc} 5422 l.mu.Unlock() 5423 } 5424 s1.mu.RUnlock() 5425 5426 var payloads [][]byte 5427 totalPayloadSize := 0 5428 count := 26 5429 for i := 0; i < count; i++ { 5430 n := rand.Intn(2048) + 1 5431 p := make([]byte, n) 5432 for j := 0; j < n; j++ { 5433 p[j] = byte(i) + 'A' 5434 } 5435 totalPayloadSize += len(p) 5436 payloads = append(payloads, p) 5437 } 5438 5439 check := func(acc, user, subj string) { 5440 t.Helper() 5441 nc2 := natsConnect(t, s2.ClientURL(), nats.UserInfo(user, "pwd")) 5442 defer nc2.Close() 5443 sub := natsSubSync(t, nc2, subj) 5444 natsFlush(t, nc2) 5445 checkSubInterest(t, s1, acc, subj, time.Second) 5446 5447 nc1 := natsConnect(t, s1.ClientURL(), nats.UserInfo(user, "pwd")) 5448 defer nc1.Close() 5449 5450 for i := 0; i < count; i++ { 5451 natsPub(t, nc1, subj, payloads[i]) 5452 } 5453 for i := 0; i < count; i++ { 5454 m := natsNexMsg(t, sub, time.Second) 5455 if !bytes.Equal(m.Data, payloads[i]) { 5456 t.Fatalf("Expected payload %q - got %q", payloads[i], m.Data) 5457 } 5458 } 5459 5460 // Also check that the leafnode stats shows that compression likely occurred 5461 var out int 5462 s1.mu.RLock() 5463 for _, l := range s1.leafs { 5464 l.mu.Lock() 5465 if l.acc.Name == acc && l.nc != nil { 5466 nc := l.nc.(*testConnSentBytes) 5467 nc.Lock() 5468 out = nc.sent 5469 nc.sent = 0 5470 nc.Unlock() 5471 } 5472 l.mu.Unlock() 5473 } 5474 s1.mu.RUnlock() 5475 // Except for account "C", where compression should be off, 5476 // "out" should at least be smaller than totalPayloadSize, use 20%. 5477 if acc == "C" { 5478 if int(out) < totalPayloadSize { 5479 t.Fatalf("Expected s1's sent bytes to be at least payload size (%v), got %v", totalPayloadSize, out) 5480 } 5481 } else { 5482 limit := totalPayloadSize * 80 / 100 5483 if int(out) > limit { 5484 t.Fatalf("Expected s1's sent bytes to be less than %v, got %v (total payload was %v)", limit, out, totalPayloadSize) 5485 } 5486 } 5487 } 5488 check("A", "a", "foo") 5489 check("B", "b", "bar") 5490 check("C", "c", "baz") 5491 5492 // Check compression settings. S1 should always be s2_fast, except for account "C" 5493 // since "C" wanted compression "off" 5494 l, err := s1.Leafz(nil) 5495 require_NoError(t, err) 5496 for _, r := range l.Leafs { 5497 switch r.Account { 5498 case "C": 5499 if r.Compression != CompressionOff { 5500 t.Fatalf("Expected compression of remote for C account to be %q, got %q", CompressionOff, r.Compression) 5501 } 5502 default: 5503 if r.Compression != CompressionS2Fast { 5504 t.Fatalf("Expected compression of remote for %s account to be %q, got %q", r.Account, CompressionS2Fast, r.Compression) 5505 } 5506 } 5507 } 5508 5509 l, err = s2.Leafz(nil) 5510 require_NoError(t, err) 5511 for _, r := range l.Leafs { 5512 switch r.Account { 5513 case "A": 5514 if r.Compression != CompressionS2Better { 5515 t.Fatalf("Expected compression for A account to be %q, got %q", CompressionS2Better, r.Compression) 5516 } 5517 case "B": 5518 if r.Compression != CompressionS2Best { 5519 t.Fatalf("Expected compression for B account to be %q, got %q", CompressionS2Best, r.Compression) 5520 } 5521 case "C": 5522 if r.Compression != CompressionOff { 5523 t.Fatalf("Expected compression for C account to be %q, got %q", CompressionOff, r.Compression) 5524 } 5525 } 5526 } 5527 } 5528 5529 func BenchmarkLeafNodeCompression(b *testing.B) { 5530 conf1 := createConfFile(b, []byte(` 5531 port: -1 5532 server_name: "Hub" 5533 accounts { 5534 A { users: [{user: a, password: pwd}] } 5535 B { users: [{user: b, password: pwd}] } 5536 C { users: [{user: c, password: pwd}] } 5537 D { users: [{user: d, password: pwd}] } 5538 } 5539 leafnodes { 5540 port: -1 5541 } 5542 `)) 5543 s1, o1 := RunServerWithConfig(conf1) 5544 defer s1.Shutdown() 5545 5546 port := o1.LeafNode.Port 5547 conf2 := createConfFile(b, []byte(fmt.Sprintf(` 5548 port: -1 5549 server_name: "Spoke" 5550 accounts { 5551 A { users: [{user: a, password: pwd}] } 5552 B { users: [{user: b, password: pwd}] } 5553 C { users: [{user: c, password: pwd}] } 5554 D { users: [{user: d, password: pwd}] } 5555 } 5556 leafnodes { 5557 remotes [ 5558 { url: "nats://a:pwd@127.0.0.1:%d", account: "A", compression: s2_better } 5559 { url: "nats://b:pwd@127.0.0.1:%d", account: "B", compression: s2_best } 5560 { url: "nats://c:pwd@127.0.0.1:%d", account: "C", compression: s2_fast } 5561 { url: "nats://d:pwd@127.0.0.1:%d", account: "D", compression: off } 5562 ] 5563 } 5564 `, port, port, port, port))) 5565 s2, _ := RunServerWithConfig(conf2) 5566 defer s2.Shutdown() 5567 5568 checkLeafNodeConnectedCount(b, s1, 4) 5569 checkLeafNodeConnectedCount(b, s2, 4) 5570 5571 l, err := s2.Leafz(nil) 5572 require_NoError(b, err) 5573 for _, r := range l.Leafs { 5574 switch { 5575 case r.Account == "A" && r.Compression == CompressionS2Better: 5576 case r.Account == "B" && r.Compression == CompressionS2Best: 5577 case r.Account == "C" && r.Compression == CompressionS2Fast: 5578 case r.Account == "D" && r.Compression == CompressionOff: 5579 default: 5580 b.Fatalf("Account %q had incorrect compression mode %q on leaf connection", r.Account, r.Compression) 5581 } 5582 } 5583 5584 msg := make([]byte, 1024) 5585 for _, p := range []struct { 5586 algo string 5587 user string 5588 }{ 5589 {"Better", "a"}, 5590 {"Best", "b"}, 5591 {"Fast", "c"}, 5592 {"Off", "d"}, 5593 } { 5594 nc1 := natsConnect(b, s1.ClientURL(), nats.UserInfo(p.user, "pwd")) 5595 nc2 := natsConnect(b, s2.ClientURL(), nats.UserInfo(p.user, "pwd")) 5596 5597 sub, err := nc1.SubscribeSync("foo") 5598 require_NoError(b, err) 5599 5600 time.Sleep(time.Second) 5601 5602 b.Run(p.algo, func(b *testing.B) { 5603 start := time.Now() 5604 5605 for i := 0; i < b.N; i++ { 5606 err = nc2.Publish("foo", msg) 5607 require_NoError(b, err) 5608 5609 _, err = sub.NextMsg(time.Second) 5610 require_NoError(b, err) 5611 } 5612 5613 b.ReportMetric(float64(len(msg)*b.N)/1024/1024, "MB") 5614 b.ReportMetric(float64(len(msg)*b.N)/1024/1024/float64(time.Since(start).Seconds()), "MB/sec") 5615 }) 5616 5617 nc1.Close() 5618 nc2.Close() 5619 } 5620 } 5621 5622 func TestLeafNodeCompressionMatrixModes(t *testing.T) { 5623 for _, test := range []struct { 5624 name string 5625 s1 string 5626 s2 string 5627 s1Expected string 5628 s2Expected string 5629 }{ 5630 {"off off", "off", "off", CompressionOff, CompressionOff}, 5631 {"off accept", "off", "accept", CompressionOff, CompressionOff}, 5632 {"off on", "off", "on", CompressionOff, CompressionOff}, 5633 {"off better", "off", "better", CompressionOff, CompressionOff}, 5634 {"off best", "off", "best", CompressionOff, CompressionOff}, 5635 5636 {"accept off", "accept", "off", CompressionOff, CompressionOff}, 5637 {"accept accept", "accept", "accept", CompressionOff, CompressionOff}, 5638 // Note: "on", means s2_auto, which will mean uncompressed since RTT is low. 5639 {"accept on", "accept", "on", CompressionS2Fast, CompressionS2Uncompressed}, 5640 {"accept better", "accept", "better", CompressionS2Better, CompressionS2Better}, 5641 {"accept best", "accept", "best", CompressionS2Best, CompressionS2Best}, 5642 5643 {"on off", "on", "off", CompressionOff, CompressionOff}, 5644 {"on accept", "on", "accept", CompressionS2Uncompressed, CompressionS2Fast}, 5645 {"on on", "on", "on", CompressionS2Uncompressed, CompressionS2Uncompressed}, 5646 {"on better", "on", "better", CompressionS2Uncompressed, CompressionS2Better}, 5647 {"on best", "on", "best", CompressionS2Uncompressed, CompressionS2Best}, 5648 5649 {"better off", "better", "off", CompressionOff, CompressionOff}, 5650 {"better accept", "better", "accept", CompressionS2Better, CompressionS2Better}, 5651 {"better on", "better", "on", CompressionS2Better, CompressionS2Uncompressed}, 5652 {"better better", "better", "better", CompressionS2Better, CompressionS2Better}, 5653 {"better best", "better", "best", CompressionS2Better, CompressionS2Best}, 5654 5655 {"best off", "best", "off", CompressionOff, CompressionOff}, 5656 {"best accept", "best", "accept", CompressionS2Best, CompressionS2Best}, 5657 {"best on", "best", "on", CompressionS2Best, CompressionS2Uncompressed}, 5658 {"best better", "best", "better", CompressionS2Best, CompressionS2Better}, 5659 {"best best", "best", "best", CompressionS2Best, CompressionS2Best}, 5660 } { 5661 t.Run(test.name, func(t *testing.T) { 5662 conf1 := createConfFile(t, []byte(fmt.Sprintf(` 5663 port: -1 5664 server_name: "A" 5665 leafnodes { 5666 port: -1 5667 compression: %s 5668 } 5669 `, test.s1))) 5670 s1, o1 := RunServerWithConfig(conf1) 5671 defer s1.Shutdown() 5672 5673 conf2 := createConfFile(t, []byte(fmt.Sprintf(` 5674 port: -1 5675 server_name: "B" 5676 leafnodes { 5677 remotes: [ 5678 {url: "nats://127.0.0.1:%d", compression: %s} 5679 ] 5680 } 5681 `, o1.LeafNode.Port, test.s2))) 5682 s2, _ := RunServerWithConfig(conf2) 5683 defer s2.Shutdown() 5684 5685 checkLeafNodeConnected(t, s2) 5686 5687 nc1 := natsConnect(t, s1.ClientURL()) 5688 defer nc1.Close() 5689 5690 nc2 := natsConnect(t, s2.ClientURL()) 5691 defer nc2.Close() 5692 5693 payload := make([]byte, 128) 5694 check := func(ncp, ncs *nats.Conn, subj string, s *Server) { 5695 t.Helper() 5696 sub := natsSubSync(t, ncs, subj) 5697 checkSubInterest(t, s, globalAccountName, subj, time.Second) 5698 natsPub(t, ncp, subj, payload) 5699 natsNexMsg(t, sub, time.Second) 5700 5701 for _, srv := range []*Server{s1, s2} { 5702 lz, err := srv.Leafz(nil) 5703 require_NoError(t, err) 5704 var expected string 5705 if srv == s1 { 5706 expected = test.s1Expected 5707 } else { 5708 expected = test.s2Expected 5709 } 5710 if cm := lz.Leafs[0].Compression; cm != expected { 5711 t.Fatalf("Server %s - expected compression %q, got %q", srv, expected, cm) 5712 } 5713 } 5714 } 5715 check(nc1, nc2, "foo", s1) 5716 check(nc2, nc1, "bar", s2) 5717 }) 5718 } 5719 } 5720 5721 func TestLeafNodeCompressionWithOlderServer(t *testing.T) { 5722 tmpl1 := ` 5723 port: -1 5724 server_name: "A" 5725 leafnodes { 5726 port: -1 5727 compression: "%s" 5728 } 5729 ` 5730 conf1 := createConfFile(t, []byte(fmt.Sprintf(tmpl1, CompressionS2Fast))) 5731 s1, o1 := RunServerWithConfig(conf1) 5732 defer s1.Shutdown() 5733 5734 tmpl2 := ` 5735 port: -1 5736 server_name: "B" 5737 leafnodes { 5738 remotes [ 5739 {url: "nats://127.0.0.1:%d", compression: "%s"} 5740 ] 5741 } 5742 ` 5743 conf2 := createConfFile(t, []byte(fmt.Sprintf(tmpl2, o1.LeafNode.Port, CompressionNotSupported))) 5744 s2, _ := RunServerWithConfig(conf2) 5745 defer s2.Shutdown() 5746 5747 checkLeafNodeConnected(t, s2) 5748 5749 getLeafCompMode := func(s *Server) string { 5750 var cm string 5751 s.mu.RLock() 5752 defer s.mu.RUnlock() 5753 for _, l := range s1.leafs { 5754 l.mu.Lock() 5755 cm = l.leaf.compression 5756 l.mu.Unlock() 5757 return cm 5758 } 5759 return _EMPTY_ 5760 } 5761 for _, s := range []*Server{s1, s2} { 5762 if cm := getLeafCompMode(s); cm != CompressionNotSupported { 5763 t.Fatalf("Expected compression not supported, got %q", cm) 5764 } 5765 } 5766 5767 s2.Shutdown() 5768 s1.Shutdown() 5769 5770 conf1 = createConfFile(t, []byte(fmt.Sprintf(tmpl1, CompressionNotSupported))) 5771 s1, o1 = RunServerWithConfig(conf1) 5772 defer s1.Shutdown() 5773 5774 conf2 = createConfFile(t, []byte(fmt.Sprintf(tmpl2, o1.LeafNode.Port, CompressionS2Fast))) 5775 s2, _ = RunServerWithConfig(conf2) 5776 defer s2.Shutdown() 5777 5778 checkLeafNodeConnected(t, s2) 5779 for _, s := range []*Server{s1, s2} { 5780 if cm := getLeafCompMode(s); cm != CompressionNotSupported { 5781 t.Fatalf("Expected compression not supported, got %q", cm) 5782 } 5783 } 5784 } 5785 5786 func TestLeafNodeCompressionAuto(t *testing.T) { 5787 for _, test := range []struct { 5788 name string 5789 s1Ping string 5790 s1Compression string 5791 s2Ping string 5792 s2Compression string 5793 checkS1 bool 5794 }{ 5795 {"remote side", "10s", CompressionS2Fast, "100ms", "{mode: s2_auto, rtt_thresholds: [10ms, 20ms, 30ms]}", false}, 5796 {"accept side", "100ms", "{mode: s2_auto, rtt_thresholds: [10ms, 20ms, 30ms]}", "10s", CompressionS2Fast, true}, 5797 } { 5798 t.Run(test.name, func(t *testing.T) { 5799 conf1 := createConfFile(t, []byte(fmt.Sprintf(` 5800 port: -1 5801 server_name: "A" 5802 ping_interval: "%s" 5803 leafnodes { 5804 port: -1 5805 compression: %s 5806 } 5807 `, test.s1Ping, test.s1Compression))) 5808 s1, o1 := RunServerWithConfig(conf1) 5809 defer s1.Shutdown() 5810 5811 // Start with 0ms RTT 5812 np := createNetProxy(0, 1024*1024*1024, 1024*1024*1024, fmt.Sprintf("nats://127.0.0.1:%d", o1.LeafNode.Port), true) 5813 5814 conf2 := createConfFile(t, []byte(fmt.Sprintf(` 5815 port: -1 5816 server_name: "B" 5817 ping_interval: "%s" 5818 leafnodes { 5819 remotes [ 5820 {url: %s, compression %s} 5821 ] 5822 } 5823 `, test.s2Ping, np.routeURL(), test.s2Compression))) 5824 s2, _ := RunServerWithConfig(conf2) 5825 defer s2.Shutdown() 5826 defer np.stop() 5827 5828 checkLeafNodeConnected(t, s2) 5829 5830 checkComp := func(expected string) { 5831 t.Helper() 5832 var s *Server 5833 if test.checkS1 { 5834 s = s1 5835 } else { 5836 s = s2 5837 } 5838 checkFor(t, 2*time.Second, 15*time.Millisecond, func() error { 5839 s.mu.RLock() 5840 defer s.mu.RUnlock() 5841 for _, l := range s.leafs { 5842 l.mu.Lock() 5843 cm := l.leaf.compression 5844 l.mu.Unlock() 5845 if cm != expected { 5846 return fmt.Errorf("Leaf %v compression mode expected to be %q, got %q", l, expected, cm) 5847 } 5848 } 5849 return nil 5850 }) 5851 } 5852 checkComp(CompressionS2Uncompressed) 5853 5854 // Change the proxy RTT and we should get compression "fast" 5855 np.updateRTT(15 * time.Millisecond) 5856 checkComp(CompressionS2Fast) 5857 5858 // Now 25ms, and get "better" 5859 np.updateRTT(25 * time.Millisecond) 5860 checkComp(CompressionS2Better) 5861 5862 // Above 35 and we should get "best" 5863 np.updateRTT(35 * time.Millisecond) 5864 checkComp(CompressionS2Best) 5865 5866 // Down to 1ms and again should get "uncompressed" 5867 np.updateRTT(1 * time.Millisecond) 5868 checkComp(CompressionS2Uncompressed) 5869 }) 5870 } 5871 5872 // Make sure that if compression is off on one side, the update of RTT does 5873 // not trigger a compression change. 5874 conf1 := createConfFile(t, []byte(` 5875 port: -1 5876 server_name: "A" 5877 leafnodes { 5878 port: -1 5879 compression: off 5880 } 5881 `)) 5882 s1, o1 := RunServerWithConfig(conf1) 5883 defer s1.Shutdown() 5884 5885 // Start with 0ms RTT 5886 np := createNetProxy(0, 1024*1024*1024, 1024*1024*1024, fmt.Sprintf("nats://127.0.0.1:%d", o1.LeafNode.Port), true) 5887 5888 conf2 := createConfFile(t, []byte(fmt.Sprintf(` 5889 port: -1 5890 server_name: "B" 5891 ping_interval: "50ms" 5892 leafnodes { 5893 remotes [ 5894 {url: %s, compression s2_auto} 5895 ] 5896 } 5897 `, np.routeURL()))) 5898 s2, _ := RunServerWithConfig(conf2) 5899 defer s2.Shutdown() 5900 defer np.stop() 5901 5902 checkLeafNodeConnected(t, s2) 5903 5904 // Even with a bug of updating compression level while it should have been 5905 // off, the check done below would almost always pass because after 5906 // reconnecting, there could be a chance to get at first compression set 5907 // to "off". So we will double check that the leaf node CID did not change 5908 // at the end of the test. 5909 getCID := func() uint64 { 5910 s2.mu.RLock() 5911 defer s2.mu.RUnlock() 5912 for _, l := range s2.leafs { 5913 l.mu.Lock() 5914 cid := l.cid 5915 l.mu.Unlock() 5916 return cid 5917 } 5918 return 0 5919 } 5920 oldCID := getCID() 5921 5922 checkCompOff := func() { 5923 t.Helper() 5924 checkFor(t, 2*time.Second, 15*time.Millisecond, func() error { 5925 s2.mu.RLock() 5926 defer s2.mu.RUnlock() 5927 if len(s2.leafs) != 1 { 5928 return fmt.Errorf("Leaf not currently connected") 5929 } 5930 for _, l := range s2.leafs { 5931 l.mu.Lock() 5932 cm := l.leaf.compression 5933 l.mu.Unlock() 5934 if cm != CompressionOff { 5935 return fmt.Errorf("Leaf %v compression mode expected to be %q, got %q", l, CompressionOff, cm) 5936 } 5937 } 5938 return nil 5939 }) 5940 } 5941 checkCompOff() 5942 5943 // Now change RTT and again, make sure that it is still off 5944 np.updateRTT(20 * time.Millisecond) 5945 time.Sleep(100 * time.Millisecond) 5946 checkCompOff() 5947 if cid := getCID(); cid != oldCID { 5948 t.Fatalf("Leafnode has reconnected, cid was %v, now %v", oldCID, cid) 5949 } 5950 } 5951 5952 func TestLeafNodeCompressionWithWSCompression(t *testing.T) { 5953 conf1 := createConfFile(t, []byte(` 5954 port: -1 5955 server_name: "A" 5956 websocket { 5957 port: -1 5958 no_tls: true 5959 compression: true 5960 } 5961 leafnodes { 5962 port: -1 5963 compression: s2_fast 5964 } 5965 `)) 5966 s1, o1 := RunServerWithConfig(conf1) 5967 defer s1.Shutdown() 5968 5969 conf2 := createConfFile(t, []byte(fmt.Sprintf(` 5970 port: -1 5971 server_name: "B" 5972 leafnodes { 5973 remotes [ 5974 { 5975 url: "ws://127.0.0.1:%d" 5976 ws_compression: true 5977 compression: s2_fast 5978 } 5979 ] 5980 } 5981 `, o1.Websocket.Port))) 5982 s2, _ := RunServerWithConfig(conf2) 5983 defer s2.Shutdown() 5984 5985 checkLeafNodeConnected(t, s2) 5986 5987 nc1 := natsConnect(t, s1.ClientURL()) 5988 defer nc1.Close() 5989 5990 sub := natsSubSync(t, nc1, "foo") 5991 checkSubInterest(t, s2, globalAccountName, "foo", time.Second) 5992 5993 nc2 := natsConnect(t, s2.ClientURL()) 5994 defer nc2.Close() 5995 5996 payload := make([]byte, 1024) 5997 for i := 0; i < len(payload); i++ { 5998 payload[i] = 'A' 5999 } 6000 natsPub(t, nc2, "foo", payload) 6001 msg := natsNexMsg(t, sub, time.Second) 6002 require_True(t, len(msg.Data) == 1024) 6003 for i := 0; i < len(msg.Data); i++ { 6004 if msg.Data[i] != 'A' { 6005 t.Fatalf("Invalid msg: %s", msg.Data) 6006 } 6007 } 6008 } 6009 6010 func TestLeafNodeCompressionWithWSGetNeedsData(t *testing.T) { 6011 conf1 := createConfFile(t, []byte(` 6012 port: -1 6013 server_name: "A" 6014 websocket { 6015 port: -1 6016 no_tls: true 6017 } 6018 leafnodes { 6019 port: -1 6020 compression: s2_fast 6021 } 6022 `)) 6023 srv1, o1 := RunServerWithConfig(conf1) 6024 defer srv1.Shutdown() 6025 6026 conf2 := createConfFile(t, []byte(fmt.Sprintf(` 6027 port: -1 6028 server_name: "B" 6029 leafnodes { 6030 remotes [ 6031 { 6032 url: "ws://127.0.0.1:%d" 6033 ws_no_masking: true 6034 compression: s2_fast 6035 } 6036 ] 6037 } 6038 `, o1.Websocket.Port))) 6039 srv2, _ := RunServerWithConfig(conf2) 6040 defer srv2.Shutdown() 6041 6042 checkLeafNodeConnected(t, srv2) 6043 6044 nc1 := natsConnect(t, srv1.ClientURL()) 6045 defer nc1.Close() 6046 6047 sub := natsSubSync(t, nc1, "foo") 6048 checkSubInterest(t, srv2, globalAccountName, "foo", time.Second) 6049 6050 // We want to have the payload more than 126 bytes so that the websocket 6051 // code need to read 2 bytes for the length. See below. 6052 payload := "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ" 6053 sentBytes := []byte("LMSG foo 156\r\n" + payload + "\r\n") 6054 h, _ := wsCreateFrameHeader(false, false, wsBinaryMessage, len(sentBytes)) 6055 combined := &bytes.Buffer{} 6056 combined.Write(h) 6057 combined.Write(sentBytes) 6058 toSend := combined.Bytes() 6059 6060 // We will make a compressed block that cuts the websocket header that 6061 // makes the reader want to read bytes directly from the connection. 6062 // We want to make sure that we are not going to get compressed data 6063 // without going through the (de)compress library. So for that, compress 6064 // the first 3 bytes. 6065 b := &bytes.Buffer{} 6066 w := s2.NewWriter(b) 6067 w.Write(toSend[:3]) 6068 w.Close() 6069 6070 var nc net.Conn 6071 srv2.mu.RLock() 6072 for _, l := range srv2.leafs { 6073 l.mu.Lock() 6074 nc = l.nc 6075 l.mu.Unlock() 6076 } 6077 srv2.mu.RUnlock() 6078 6079 nc.Write(b.Bytes()) 6080 6081 // Pause to make sure other side just gets a partial of the whole WS frame. 6082 time.Sleep(100 * time.Millisecond) 6083 6084 b.Reset() 6085 w.Reset(b) 6086 w.Write(toSend[3:]) 6087 w.Close() 6088 6089 nc.Write(b.Bytes()) 6090 6091 msg := natsNexMsg(t, sub, time.Second) 6092 require_True(t, len(msg.Data) == 156) 6093 require_Equal(t, string(msg.Data), payload) 6094 } 6095 6096 func TestLeafNodeCompressionAuthTimeout(t *testing.T) { 6097 hconf := createConfFile(t, []byte(` 6098 port: -1 6099 server_name: "hub" 6100 leafnodes { 6101 port: -1 6102 authorization { 6103 timeout: 0.75 6104 } 6105 } 6106 `)) 6107 sh, oh := RunServerWithConfig(hconf) 6108 defer sh.Shutdown() 6109 6110 sconfTmpl := ` 6111 port: -1 6112 server_name: "%s" 6113 cluster { 6114 port: -1 6115 name: "spoke" 6116 %s 6117 } 6118 leafnodes { 6119 port: -1 6120 remotes [ 6121 { url: "nats://127.0.0.1:%d" } 6122 ] 6123 } 6124 ` 6125 s1conf := createConfFile(t, []byte(fmt.Sprintf(sconfTmpl, "SP1", _EMPTY_, oh.LeafNode.Port))) 6126 s1, o1 := RunServerWithConfig(s1conf) 6127 defer s1.Shutdown() 6128 6129 s2conf := createConfFile(t, []byte(fmt.Sprintf(sconfTmpl, "SP2", fmt.Sprintf("routes: [\"nats://127.0.0.1:%d\"]", o1.Cluster.Port), oh.LeafNode.Port))) 6130 s2, _ := RunServerWithConfig(s2conf) 6131 defer s2.Shutdown() 6132 6133 checkClusterFormed(t, s1, s2) 6134 6135 checkLeafNodeConnected(t, s1) 6136 checkLeafNodeConnected(t, s2) 6137 6138 getCID := func(s *Server) uint64 { 6139 s.mu.RLock() 6140 defer s.mu.RUnlock() 6141 var cid uint64 6142 for _, l := range s.leafs { 6143 l.mu.Lock() 6144 cid = l.cid 6145 l.mu.Unlock() 6146 } 6147 return cid 6148 } 6149 leaf1 := getCID(s1) 6150 leaf2 := getCID(s2) 6151 6152 // Wait for more than auth timeout 6153 time.Sleep(time.Second) 6154 6155 checkLeafNodeConnected(t, s1) 6156 checkLeafNodeConnected(t, s2) 6157 if l1 := getCID(s1); l1 != leaf1 { 6158 t.Fatalf("Leaf connection first connection had CID %v, now %v", leaf1, l1) 6159 } 6160 if l2 := getCID(s2); l2 != leaf2 { 6161 t.Fatalf("Leaf connection first connection had CID %v, now %v", leaf2, l2) 6162 } 6163 } 6164 6165 func TestLeafNodeWithWeightedDQRequestsToSuperClusterWithSeparateAccounts(t *testing.T) { 6166 sc := createJetStreamSuperClusterWithTemplate(t, jsClusterAccountsTempl, 3, 2) 6167 defer sc.shutdown() 6168 6169 // Now create a leafnode cluster that has 2 LNs, one to each cluster but on separate accounts, ONE and TWO. 6170 var lnTmpl = ` 6171 listen: 127.0.0.1:-1 6172 server_name: %s 6173 jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} 6174 6175 {{leaf}} 6176 6177 cluster { 6178 name: %s 6179 listen: 127.0.0.1:%d 6180 routes = [%s] 6181 } 6182 6183 accounts { $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] }} 6184 ` 6185 6186 var leafFrag = ` 6187 leaf { 6188 listen: 127.0.0.1:-1 6189 remotes [ 6190 { urls: [ %s ] } 6191 { urls: [ %s ] } 6192 ] 6193 }` 6194 6195 // We want to have two leaf node connections that join to the same local account on the leafnode servers, 6196 // but connect to different accounts in different clusters. 6197 c1 := sc.clusters[0] // Will connect to account ONE 6198 c2 := sc.clusters[1] // Will connect to account TWO 6199 6200 genLeafTmpl := func(tmpl string) string { 6201 t.Helper() 6202 6203 var ln1, ln2 []string 6204 for _, s := range c1.servers { 6205 if s.ClusterName() != c1.name { 6206 continue 6207 } 6208 ln := s.getOpts().LeafNode 6209 ln1 = append(ln1, fmt.Sprintf("nats://one:p@%s:%d", ln.Host, ln.Port)) 6210 } 6211 6212 for _, s := range c2.servers { 6213 if s.ClusterName() != c2.name { 6214 continue 6215 } 6216 ln := s.getOpts().LeafNode 6217 ln2 = append(ln2, fmt.Sprintf("nats://two:p@%s:%d", ln.Host, ln.Port)) 6218 } 6219 return strings.Replace(tmpl, "{{leaf}}", fmt.Sprintf(leafFrag, strings.Join(ln1, ", "), strings.Join(ln2, ", ")), 1) 6220 } 6221 6222 tmpl := strings.Replace(lnTmpl, "store_dir:", fmt.Sprintf(`domain: "%s", store_dir:`, "SA"), 1) 6223 tmpl = genLeafTmpl(tmpl) 6224 6225 ln := createJetStreamCluster(t, tmpl, "SA", "SA-", 3, 22280, false) 6226 ln.waitOnClusterReady() 6227 defer ln.shutdown() 6228 6229 for _, s := range ln.servers { 6230 checkLeafNodeConnectedCount(t, s, 2) 6231 } 6232 6233 // Now connect DQ subscribers to each cluster and they separate accounts, and make sure we get the right behavior, balanced between 6234 // them when requests originate from the leaf cluster. 6235 6236 // Create 5 clients for each cluster / account 6237 var c1c, c2c []*nats.Conn 6238 for i := 0; i < 5; i++ { 6239 nc1, _ := jsClientConnect(t, c1.randomServer(), nats.UserInfo("one", "p")) 6240 defer nc1.Close() 6241 c1c = append(c1c, nc1) 6242 nc2, _ := jsClientConnect(t, c2.randomServer(), nats.UserInfo("two", "p")) 6243 defer nc2.Close() 6244 c2c = append(c2c, nc2) 6245 } 6246 6247 createSubs := func(num int, conns []*nats.Conn) (subs []*nats.Subscription) { 6248 for i := 0; i < num; i++ { 6249 nc := conns[rand.Intn(len(conns))] 6250 sub, err := nc.QueueSubscribeSync("REQUEST", "MC") 6251 require_NoError(t, err) 6252 subs = append(subs, sub) 6253 nc.Flush() 6254 } 6255 // Let subs propagate. 6256 time.Sleep(100 * time.Millisecond) 6257 return subs 6258 } 6259 closeSubs := func(subs []*nats.Subscription) { 6260 for _, sub := range subs { 6261 sub.Unsubscribe() 6262 } 6263 } 6264 6265 // Simple test first. 6266 subs1 := createSubs(1, c1c) 6267 defer closeSubs(subs1) 6268 subs2 := createSubs(1, c2c) 6269 defer closeSubs(subs2) 6270 6271 sendRequests := func(num int) { 6272 t.Helper() 6273 // Now connect to the leaf cluster and send some requests. 6274 nc, _ := jsClientConnect(t, ln.randomServer()) 6275 defer nc.Close() 6276 6277 for i := 0; i < num; i++ { 6278 require_NoError(t, nc.Publish("REQUEST", []byte("HELP"))) 6279 } 6280 nc.Flush() 6281 } 6282 6283 pending := func(subs []*nats.Subscription) (total int) { 6284 t.Helper() 6285 for _, sub := range subs { 6286 n, _, err := sub.Pending() 6287 require_NoError(t, err) 6288 total += n 6289 } 6290 return total 6291 } 6292 6293 num := 1000 6294 checkAllReceived := func() error { 6295 total := pending(subs1) + pending(subs2) 6296 if total == num { 6297 return nil 6298 } 6299 return fmt.Errorf("Not all received: %d vs %d", total, num) 6300 } 6301 6302 checkBalanced := func(total, pc1, pc2 int) { 6303 t.Helper() 6304 tf := float64(total) 6305 e1 := tf * (float64(pc1) / 100.00) 6306 e2 := tf * (float64(pc2) / 100.00) 6307 delta := tf / 10 6308 p1 := float64(pending(subs1)) 6309 if p1 < e1-delta || p1 > e1+delta { 6310 t.Fatalf("Value out of range for subs1, expected %v got %v", e1, p1) 6311 } 6312 p2 := float64(pending(subs2)) 6313 if p2 < e2-delta || p2 > e2+delta { 6314 t.Fatalf("Value out of range for subs2, expected %v got %v", e2, p2) 6315 } 6316 } 6317 6318 // Now connect to the leaf cluster and send some requests. 6319 6320 // Simple 50/50 6321 sendRequests(num) 6322 checkFor(t, time.Second, 200*time.Millisecond, checkAllReceived) 6323 checkBalanced(num, 50, 50) 6324 6325 closeSubs(subs1) 6326 closeSubs(subs2) 6327 6328 // Now test unbalanced. 10/90 6329 subs1 = createSubs(1, c1c) 6330 defer closeSubs(subs1) 6331 subs2 = createSubs(9, c2c) 6332 defer closeSubs(subs2) 6333 6334 sendRequests(num) 6335 checkFor(t, time.Second, 200*time.Millisecond, checkAllReceived) 6336 checkBalanced(num, 10, 90) 6337 6338 // Now test draining the subs as we are sending from an initial balanced situation simulating a draining of a cluster. 6339 6340 closeSubs(subs1) 6341 closeSubs(subs2) 6342 subs1, subs2 = nil, nil 6343 6344 // These subs slightly different. 6345 var r1, r2 atomic.Uint64 6346 for i := 0; i < 20; i++ { 6347 nc := c1c[rand.Intn(len(c1c))] 6348 sub, err := nc.QueueSubscribe("REQUEST", "MC", func(m *nats.Msg) { r1.Add(1) }) 6349 require_NoError(t, err) 6350 subs1 = append(subs1, sub) 6351 nc.Flush() 6352 6353 nc = c2c[rand.Intn(len(c2c))] 6354 sub, err = nc.QueueSubscribe("REQUEST", "MC", func(m *nats.Msg) { r2.Add(1) }) 6355 require_NoError(t, err) 6356 subs2 = append(subs2, sub) 6357 nc.Flush() 6358 } 6359 defer closeSubs(subs1) 6360 defer closeSubs(subs2) 6361 6362 nc, _ := jsClientConnect(t, ln.randomServer()) 6363 defer nc.Close() 6364 6365 for i, dindex := 0, 1; i < num; i++ { 6366 require_NoError(t, nc.Publish("REQUEST", []byte("HELP"))) 6367 // Check if we have more to simulate draining. 6368 // Will drain within first ~100 requests using 20% rand test below. 6369 // Will leave 1 behind. 6370 if dindex < len(subs1)-1 && rand.Intn(6) > 4 { 6371 sub := subs1[dindex] 6372 dindex++ 6373 sub.Drain() 6374 } 6375 } 6376 nc.Flush() 6377 6378 checkFor(t, time.Second, 200*time.Millisecond, func() error { 6379 total := int(r1.Load() + r2.Load()) 6380 if total == num { 6381 return nil 6382 } 6383 return fmt.Errorf("Not all received: %d vs %d", total, num) 6384 }) 6385 require_True(t, r2.Load() > r1.Load()) 6386 } 6387 6388 func TestLeafNodeWithWeightedDQRequestsToSuperClusterWithStreamImportAccounts(t *testing.T) { 6389 var tmpl = ` 6390 listen: 127.0.0.1:-1 6391 6392 server_name: %s 6393 jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} 6394 6395 leaf { listen: 127.0.0.1:-1 } 6396 6397 cluster { 6398 name: %s 6399 listen: 127.0.0.1:%d 6400 routes = [%s] 6401 } 6402 6403 accounts { 6404 EFG { 6405 users = [ { user: "efg", pass: "p" } ] 6406 jetstream: enabled 6407 imports [ 6408 { stream: { account: STL, subject: "REQUEST"} } 6409 { stream: { account: KSC, subject: "REQUEST"} } 6410 ] 6411 exports [ { stream: "RESPONSE" } ] 6412 } 6413 STL { 6414 users = [ { user: "stl", pass: "p" } ] 6415 exports [ { stream: "REQUEST" } ] 6416 imports [ { stream: { account: EFG, subject: "RESPONSE"} } ] 6417 } 6418 KSC { 6419 users = [ { user: "ksc", pass: "p" } ] 6420 exports [ { stream: "REQUEST" } ] 6421 imports [ { stream: { account: EFG, subject: "RESPONSE"} } ] 6422 } 6423 $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } 6424 }` 6425 6426 sc := createJetStreamSuperClusterWithTemplate(t, tmpl, 5, 2) 6427 defer sc.shutdown() 6428 6429 // Now create a leafnode cluster that has 2 LNs, one to each cluster but on separate accounts, STL and KSC. 6430 var lnTmpl = ` 6431 listen: 127.0.0.1:-1 6432 server_name: %s 6433 jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} 6434 6435 {{leaf}} 6436 6437 cluster { 6438 name: %s 6439 listen: 127.0.0.1:%d 6440 routes = [%s] 6441 } 6442 6443 accounts { $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] }} 6444 ` 6445 6446 var leafFrag = ` 6447 leaf { 6448 listen: 127.0.0.1:-1 6449 remotes [ 6450 { urls: [ %s ] } 6451 { urls: [ %s ] } 6452 { urls: [ %s ] ; deny_export: [REQUEST, RESPONSE], deny_import: RESPONSE } 6453 ] 6454 }` 6455 6456 // We want to have two leaf node connections that join to the same local account on the leafnode servers, 6457 // but connect to different accounts in different clusters. 6458 c1 := sc.clusters[0] // Will connect to account KSC 6459 c2 := sc.clusters[1] // Will connect to account STL 6460 6461 genLeafTmpl := func(tmpl string) string { 6462 t.Helper() 6463 6464 var ln1, ln2, ln3 []string 6465 for _, s := range c1.servers { 6466 if s.ClusterName() != c1.name { 6467 continue 6468 } 6469 ln := s.getOpts().LeafNode 6470 ln1 = append(ln1, fmt.Sprintf("nats://ksc:p@%s:%d", ln.Host, ln.Port)) 6471 } 6472 6473 for _, s := range c2.servers { 6474 if s.ClusterName() != c2.name { 6475 continue 6476 } 6477 ln := s.getOpts().LeafNode 6478 ln2 = append(ln2, fmt.Sprintf("nats://stl:p@%s:%d", ln.Host, ln.Port)) 6479 ln3 = append(ln3, fmt.Sprintf("nats://efg:p@%s:%d", ln.Host, ln.Port)) 6480 } 6481 return strings.Replace(tmpl, "{{leaf}}", fmt.Sprintf(leafFrag, strings.Join(ln1, ", "), strings.Join(ln2, ", "), strings.Join(ln3, ", ")), 1) 6482 } 6483 6484 tmpl = strings.Replace(lnTmpl, "store_dir:", fmt.Sprintf(`domain: "%s", store_dir:`, "SA"), 1) 6485 tmpl = genLeafTmpl(tmpl) 6486 6487 ln := createJetStreamCluster(t, tmpl, "SA", "SA-", 3, 22280, false) 6488 ln.waitOnClusterReady() 6489 defer ln.shutdown() 6490 6491 for _, s := range ln.servers { 6492 checkLeafNodeConnectedCount(t, s, 3) 6493 } 6494 6495 // Now connect DQ subscribers to each cluster but to the global account. 6496 6497 // Create 5 clients for each cluster / account 6498 var c1c, c2c []*nats.Conn 6499 for i := 0; i < 5; i++ { 6500 nc1, _ := jsClientConnect(t, c1.randomServer(), nats.UserInfo("efg", "p")) 6501 defer nc1.Close() 6502 c1c = append(c1c, nc1) 6503 nc2, _ := jsClientConnect(t, c2.randomServer(), nats.UserInfo("efg", "p")) 6504 defer nc2.Close() 6505 c2c = append(c2c, nc2) 6506 } 6507 6508 createSubs := func(num int, conns []*nats.Conn) (subs []*nats.Subscription) { 6509 for i := 0; i < num; i++ { 6510 nc := conns[rand.Intn(len(conns))] 6511 sub, err := nc.QueueSubscribeSync("REQUEST", "MC") 6512 require_NoError(t, err) 6513 subs = append(subs, sub) 6514 nc.Flush() 6515 } 6516 // Let subs propagate. 6517 time.Sleep(100 * time.Millisecond) 6518 return subs 6519 } 6520 closeSubs := func(subs []*nats.Subscription) { 6521 for _, sub := range subs { 6522 sub.Unsubscribe() 6523 } 6524 } 6525 6526 // Simple test first. 6527 subs1 := createSubs(1, c1c) 6528 defer closeSubs(subs1) 6529 subs2 := createSubs(1, c2c) 6530 defer closeSubs(subs2) 6531 6532 sendRequests := func(num int) { 6533 t.Helper() 6534 // Now connect to the leaf cluster and send some requests. 6535 nc, _ := jsClientConnect(t, ln.randomServer()) 6536 defer nc.Close() 6537 6538 for i := 0; i < num; i++ { 6539 require_NoError(t, nc.Publish("REQUEST", []byte("HELP"))) 6540 } 6541 nc.Flush() 6542 } 6543 6544 pending := func(subs []*nats.Subscription) (total int) { 6545 t.Helper() 6546 for _, sub := range subs { 6547 n, _, err := sub.Pending() 6548 require_NoError(t, err) 6549 total += n 6550 } 6551 return total 6552 } 6553 6554 num := 1000 6555 checkAllReceived := func() error { 6556 total := pending(subs1) + pending(subs2) 6557 if total == num { 6558 return nil 6559 } 6560 return fmt.Errorf("Not all received: %d vs %d", total, num) 6561 } 6562 6563 checkBalanced := func(total, pc1, pc2 int) { 6564 t.Helper() 6565 tf := float64(total) 6566 e1 := tf * (float64(pc1) / 100.00) 6567 e2 := tf * (float64(pc2) / 100.00) 6568 delta := tf / 10 6569 p1 := float64(pending(subs1)) 6570 if p1 < e1-delta || p1 > e1+delta { 6571 t.Fatalf("Value out of range for subs1, expected %v got %v", e1, p1) 6572 } 6573 p2 := float64(pending(subs2)) 6574 if p2 < e2-delta || p2 > e2+delta { 6575 t.Fatalf("Value out of range for subs2, expected %v got %v", e2, p2) 6576 } 6577 } 6578 6579 // Now connect to the leaf cluster and send some requests. 6580 6581 // Simple 50/50 6582 sendRequests(num) 6583 checkFor(t, time.Second, 200*time.Millisecond, checkAllReceived) 6584 checkBalanced(num, 50, 50) 6585 6586 closeSubs(subs1) 6587 closeSubs(subs2) 6588 6589 // Now test unbalanced. 10/90 6590 subs1 = createSubs(1, c1c) 6591 defer closeSubs(subs1) 6592 subs2 = createSubs(9, c2c) 6593 defer closeSubs(subs2) 6594 6595 sendRequests(num) 6596 checkFor(t, time.Second, 200*time.Millisecond, checkAllReceived) 6597 checkBalanced(num, 10, 90) 6598 6599 closeSubs(subs1) 6600 closeSubs(subs2) 6601 6602 // Now test unbalanced. 80/20 6603 subs1 = createSubs(80, c1c) 6604 defer closeSubs(subs1) 6605 subs2 = createSubs(20, c2c) 6606 defer closeSubs(subs2) 6607 6608 sendRequests(num) 6609 checkFor(t, time.Second, 200*time.Millisecond, checkAllReceived) 6610 checkBalanced(num, 80, 20) 6611 6612 // Now test draining the subs as we are sending from an initial balanced situation simulating a draining of a cluster. 6613 6614 closeSubs(subs1) 6615 closeSubs(subs2) 6616 subs1, subs2 = nil, nil 6617 6618 // These subs slightly different. 6619 var r1, r2 atomic.Uint64 6620 for i := 0; i < 20; i++ { 6621 nc := c1c[rand.Intn(len(c1c))] 6622 sub, err := nc.QueueSubscribe("REQUEST", "MC", func(m *nats.Msg) { r1.Add(1) }) 6623 require_NoError(t, err) 6624 subs1 = append(subs1, sub) 6625 nc.Flush() 6626 6627 nc = c2c[rand.Intn(len(c2c))] 6628 sub, err = nc.QueueSubscribe("REQUEST", "MC", func(m *nats.Msg) { r2.Add(1) }) 6629 require_NoError(t, err) 6630 subs2 = append(subs2, sub) 6631 nc.Flush() 6632 } 6633 defer closeSubs(subs1) 6634 defer closeSubs(subs2) 6635 6636 nc, _ := jsClientConnect(t, ln.randomServer()) 6637 defer nc.Close() 6638 6639 for i, dindex := 0, 1; i < num; i++ { 6640 require_NoError(t, nc.Publish("REQUEST", []byte("HELP"))) 6641 // Check if we have more to simulate draining. 6642 // Will drain within first ~100 requests using 20% rand test below. 6643 // Will leave 1 behind. 6644 if dindex < len(subs1)-1 && rand.Intn(6) > 4 { 6645 sub := subs1[dindex] 6646 dindex++ 6647 sub.Drain() 6648 } 6649 } 6650 nc.Flush() 6651 6652 checkFor(t, time.Second, 200*time.Millisecond, func() error { 6653 total := int(r1.Load() + r2.Load()) 6654 if total == num { 6655 return nil 6656 } 6657 return fmt.Errorf("Not all received: %d vs %d", total, num) 6658 }) 6659 require_True(t, r2.Load() > r1.Load()) 6660 6661 // Now check opposite flow for responses. 6662 6663 // Create 10 subscribers. 6664 var rsubs []*nats.Subscription 6665 6666 for i := 0; i < 10; i++ { 6667 nc, _ := jsClientConnect(t, ln.randomServer()) 6668 defer nc.Close() 6669 sub, err := nc.QueueSubscribeSync("RESPONSE", "SA") 6670 require_NoError(t, err) 6671 nc.Flush() 6672 rsubs = append(rsubs, sub) 6673 } 6674 6675 nc, _ = jsClientConnect(t, ln.randomServer()) 6676 defer nc.Close() 6677 _, err := nc.SubscribeSync("RESPONSE") 6678 require_NoError(t, err) 6679 nc.Flush() 6680 6681 // Now connect and send responses from EFG in cloud. 6682 nc, _ = jsClientConnect(t, sc.randomServer(), nats.UserInfo("efg", "p")) 6683 6684 for i := 0; i < 100; i++ { 6685 require_NoError(t, nc.Publish("RESPONSE", []byte("OK"))) 6686 } 6687 nc.Flush() 6688 6689 checkAllRespReceived := func() error { 6690 p := pending(rsubs) 6691 if p == 100 { 6692 return nil 6693 } 6694 return fmt.Errorf("Not all responses received: %d vs %d", p, 100) 6695 } 6696 6697 checkFor(t, time.Second, 200*time.Millisecond, checkAllRespReceived) 6698 } 6699 6700 func TestLeafNodeWithWeightedDQResponsesWithStreamImportAccountsWithUnsub(t *testing.T) { 6701 var tmpl = ` 6702 listen: 127.0.0.1:-1 6703 6704 server_name: %s 6705 jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} 6706 6707 leaf { listen: 127.0.0.1:-1 } 6708 6709 cluster { 6710 name: %s 6711 listen: 127.0.0.1:%d 6712 routes = [%s] 6713 } 6714 6715 accounts { 6716 EFG { 6717 users = [ { user: "efg", pass: "p" } ] 6718 jetstream: enabled 6719 exports [ { stream: "RESPONSE" } ] 6720 } 6721 STL { 6722 users = [ { user: "stl", pass: "p" } ] 6723 imports [ { stream: { account: EFG, subject: "RESPONSE"} } ] 6724 } 6725 KSC { 6726 users = [ { user: "ksc", pass: "p" } ] 6727 imports [ { stream: { account: EFG, subject: "RESPONSE"} } ] 6728 } 6729 $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } 6730 }` 6731 6732 c := createJetStreamClusterWithTemplate(t, tmpl, "US-CENTRAL", 3) 6733 defer c.shutdown() 6734 6735 // Now create a leafnode cluster that has 2 LNs, one to each cluster but on separate accounts, STL and KSC. 6736 var lnTmpl = ` 6737 listen: 127.0.0.1:-1 6738 server_name: %s 6739 jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} 6740 6741 {{leaf}} 6742 6743 cluster { 6744 name: %s 6745 listen: 127.0.0.1:%d 6746 routes = [%s] 6747 } 6748 6749 accounts { $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] }} 6750 ` 6751 6752 var leafFrag = ` 6753 leaf { 6754 listen: 127.0.0.1:-1 6755 remotes [ { urls: [ %s ] } ] 6756 }` 6757 6758 genLeafTmpl := func(tmpl string) string { 6759 t.Helper() 6760 6761 var ln []string 6762 for _, s := range c.servers { 6763 lno := s.getOpts().LeafNode 6764 ln = append(ln, fmt.Sprintf("nats://ksc:p@%s:%d", lno.Host, lno.Port)) 6765 } 6766 return strings.Replace(tmpl, "{{leaf}}", fmt.Sprintf(leafFrag, strings.Join(ln, ", ")), 1) 6767 } 6768 6769 tmpl = strings.Replace(lnTmpl, "store_dir:", fmt.Sprintf(`domain: "%s", store_dir:`, "SA"), 1) 6770 tmpl = genLeafTmpl(tmpl) 6771 6772 ln := createJetStreamCluster(t, tmpl, "SA", "SA-", 3, 22280, false) 6773 ln.waitOnClusterReady() 6774 defer ln.shutdown() 6775 6776 for _, s := range ln.servers { 6777 checkLeafNodeConnectedCount(t, s, 1) 6778 } 6779 6780 // Create 10 subscribers. 6781 var rsubs []*nats.Subscription 6782 6783 closeSubs := func(subs []*nats.Subscription) { 6784 for _, sub := range subs { 6785 sub.Unsubscribe() 6786 } 6787 } 6788 6789 checkAllRespReceived := func() error { 6790 t.Helper() 6791 var total int 6792 for _, sub := range rsubs { 6793 n, _, err := sub.Pending() 6794 require_NoError(t, err) 6795 total += n 6796 } 6797 if total == 100 { 6798 return nil 6799 } 6800 return fmt.Errorf("Not all responses received: %d vs %d", total, 100) 6801 } 6802 6803 s := ln.randomServer() 6804 for i := 0; i < 4; i++ { 6805 nc, _ := jsClientConnect(t, s) 6806 defer nc.Close() 6807 sub, err := nc.QueueSubscribeSync("RESPONSE", "SA") 6808 require_NoError(t, err) 6809 nc.Flush() 6810 rsubs = append(rsubs, sub) 6811 } 6812 6813 // Now connect and send responses from EFG in cloud. 6814 nc, _ := jsClientConnect(t, c.randomServer(), nats.UserInfo("efg", "p")) 6815 for i := 0; i < 100; i++ { 6816 require_NoError(t, nc.Publish("RESPONSE", []byte("OK"))) 6817 } 6818 nc.Flush() 6819 6820 // Make sure all received. 6821 checkFor(t, time.Second, 200*time.Millisecond, checkAllRespReceived) 6822 6823 checkAccountInterest := func(s *Server, accName string) *SublistResult { 6824 t.Helper() 6825 acc, err := s.LookupAccount(accName) 6826 require_NoError(t, err) 6827 acc.mu.RLock() 6828 r := acc.sl.Match("RESPONSE") 6829 acc.mu.RUnlock() 6830 return r 6831 } 6832 6833 checkInterest := func() error { 6834 t.Helper() 6835 for _, s := range c.servers { 6836 if r := checkAccountInterest(s, "KSC"); len(r.psubs)+len(r.qsubs) > 0 { 6837 return fmt.Errorf("Subs still present for %q: %+v", "KSC", r) 6838 } 6839 if r := checkAccountInterest(s, "EFG"); len(r.psubs)+len(r.qsubs) > 0 { 6840 return fmt.Errorf("Subs still present for %q: %+v", "EFG", r) 6841 } 6842 } 6843 return nil 6844 } 6845 6846 // Now unsub them and create new ones on a different server. 6847 closeSubs(rsubs) 6848 rsubs = rsubs[:0] 6849 6850 // Also restart the server that we had all the rsubs on. 6851 s.Shutdown() 6852 s.WaitForShutdown() 6853 s = ln.restartServer(s) 6854 ln.waitOnClusterReady() 6855 ln.waitOnServerCurrent(s) 6856 6857 checkFor(t, time.Second, 200*time.Millisecond, checkInterest) 6858 6859 for i := 0; i < 4; i++ { 6860 nc, _ := jsClientConnect(t, s) 6861 defer nc.Close() 6862 sub, err := nc.QueueSubscribeSync("RESPONSE", "SA") 6863 require_NoError(t, err) 6864 nc.Flush() 6865 rsubs = append(rsubs, sub) 6866 } 6867 6868 for i := 0; i < 100; i++ { 6869 require_NoError(t, nc.Publish("RESPONSE", []byte("OK"))) 6870 } 6871 nc.Flush() 6872 6873 // Make sure all received. 6874 checkFor(t, time.Second, 200*time.Millisecond, checkAllRespReceived) 6875 6876 closeSubs(rsubs) 6877 checkFor(t, time.Second, 200*time.Millisecond, checkInterest) 6878 } 6879 6880 func TestLeafNodeTwoRemotesToSameHubAccount(t *testing.T) { 6881 conf1 := createConfFile(t, []byte(` 6882 port: -1 6883 server_name: "hub" 6884 accounts { 6885 HA { users: [{user: ha, password: pwd}] } 6886 } 6887 leafnodes { 6888 port: -1 6889 } 6890 `)) 6891 s1, o1 := RunServerWithConfig(conf1) 6892 defer s1.Shutdown() 6893 6894 conf2 := createConfFile(t, []byte(fmt.Sprintf(` 6895 port: -1 6896 server_name: "spoke" 6897 accounts { 6898 A { users: [{user: A, password: pwd}] } 6899 B { users: [{user: B, password: pwd}] } 6900 C { users: [{user: C, password: pwd}] } 6901 } 6902 leafnodes { 6903 remotes [ 6904 { 6905 url: "nats://ha:pwd@127.0.0.1:%d" 6906 local: "A" 6907 } 6908 { 6909 url: "nats://ha:pwd@127.0.0.1:%d" 6910 local: "C" 6911 } 6912 ] 6913 } 6914 `, o1.LeafNode.Port, o1.LeafNode.Port))) 6915 s2, _ := RunServerWithConfig(conf2) 6916 defer s2.Shutdown() 6917 6918 l := &captureErrorLogger{errCh: make(chan string, 10)} 6919 s2.SetLogger(l, false, false) 6920 6921 checkLeafNodeConnectedCount(t, s2, 2) 6922 6923 // Make sure we don't get duplicate leafnode connection errors 6924 deadline := time.NewTimer(1500 * time.Millisecond) 6925 for done := false; !done; { 6926 select { 6927 case err := <-l.errCh: 6928 if strings.Contains(err, DuplicateRemoteLeafnodeConnection.String()) { 6929 t.Fatalf("Got error: %v", err) 6930 } 6931 case <-deadline.C: 6932 done = true 6933 } 6934 } 6935 6936 nca := natsConnect(t, s2.ClientURL(), nats.UserInfo("A", "pwd")) 6937 defer nca.Close() 6938 suba := natsSubSync(t, nca, "A") 6939 ncb := natsConnect(t, s2.ClientURL(), nats.UserInfo("B", "pwd")) 6940 defer ncb.Close() 6941 subb := natsSubSync(t, ncb, "B") 6942 ncc := natsConnect(t, s2.ClientURL(), nats.UserInfo("C", "pwd")) 6943 defer ncc.Close() 6944 subc := natsSubSync(t, ncc, "C") 6945 subs := map[string]*nats.Subscription{"A": suba, "B": subb, "C": subc} 6946 6947 for _, subj := range []string{"A", "C"} { 6948 checkSubInterest(t, s1, "HA", subj, time.Second) 6949 } 6950 6951 nc := natsConnect(t, s1.ClientURL(), nats.UserInfo("ha", "pwd")) 6952 defer nc.Close() 6953 6954 for _, subj := range []string{"A", "B", "C"} { 6955 natsPub(t, nc, subj, []byte("hello")) 6956 } 6957 6958 for _, subj := range []string{"A", "B", "C"} { 6959 var expected bool 6960 if subj != "B" { 6961 expected = true 6962 } 6963 sub := subs[subj] 6964 if expected { 6965 natsNexMsg(t, sub, time.Second) 6966 } else { 6967 if _, err := sub.NextMsg(50 * time.Millisecond); err != nats.ErrTimeout { 6968 t.Fatalf("Expected timeout error, got %v", err) 6969 } 6970 } 6971 } 6972 } 6973 6974 func TestLeafNodeTwoRemotesToSameHubAccountWithClusters(t *testing.T) { 6975 hubTmpl := ` 6976 port: -1 6977 server_name: "%s" 6978 accounts { 6979 HA { users: [{user: HA, password: pwd}] } 6980 } 6981 cluster { 6982 name: "hub" 6983 port: -1 6984 %s 6985 } 6986 leafnodes { 6987 port: -1 6988 } 6989 ` 6990 confH1 := createConfFile(t, []byte(fmt.Sprintf(hubTmpl, "H1", _EMPTY_))) 6991 sh1, oh1 := RunServerWithConfig(confH1) 6992 defer sh1.Shutdown() 6993 6994 confH2 := createConfFile(t, []byte(fmt.Sprintf(hubTmpl, "H2", fmt.Sprintf("routes: [\"nats://127.0.0.1:%d\"]", oh1.Cluster.Port)))) 6995 sh2, oh2 := RunServerWithConfig(confH2) 6996 defer sh2.Shutdown() 6997 6998 checkClusterFormed(t, sh1, sh2) 6999 7000 spokeTmpl := ` 7001 port: -1 7002 server_name: "%s" 7003 accounts { 7004 A { users: [{user: A, password: pwd}] } 7005 B { users: [{user: B, password: pwd}] } 7006 } 7007 cluster { 7008 name: "spoke" 7009 port: -1 7010 %s 7011 } 7012 leafnodes { 7013 remotes [ 7014 { 7015 url: "nats://HA:pwd@127.0.0.1:%d" 7016 local: "A" 7017 } 7018 { 7019 url: "nats://HA:pwd@127.0.0.1:%d" 7020 local: "B" 7021 } 7022 ] 7023 } 7024 ` 7025 for _, test := range []struct { 7026 name string 7027 sp2Leafport int 7028 }{ 7029 {"connect to different hub servers", oh2.LeafNode.Port}, 7030 {"connect to same hub server", oh1.LeafNode.Port}, 7031 } { 7032 t.Run(test.name, func(t *testing.T) { 7033 confSP1 := createConfFile(t, []byte(fmt.Sprintf(spokeTmpl, "SP1", _EMPTY_, oh1.LeafNode.Port, oh1.LeafNode.Port))) 7034 sp1, osp1 := RunServerWithConfig(confSP1) 7035 defer sp1.Shutdown() 7036 7037 confSP2 := createConfFile(t, []byte(fmt.Sprintf(spokeTmpl, "SP2", 7038 fmt.Sprintf("routes: [\"nats://127.0.0.1:%d\"]", osp1.Cluster.Port), test.sp2Leafport, test.sp2Leafport))) 7039 sp2, _ := RunServerWithConfig(confSP2) 7040 defer sp2.Shutdown() 7041 7042 checkClusterFormed(t, sp1, sp2) 7043 checkLeafNodeConnectedCount(t, sp1, 2) 7044 checkLeafNodeConnectedCount(t, sp2, 2) 7045 7046 var conns []*nats.Conn 7047 createConn := func(s *Server, user string) { 7048 t.Helper() 7049 nc := natsConnect(t, s.ClientURL(), nats.UserInfo(user, "pwd")) 7050 conns = append(conns, nc) 7051 } 7052 for _, nc := range conns { 7053 defer nc.Close() 7054 } 7055 createConn(sh1, "HA") 7056 createConn(sh2, "HA") 7057 createConn(sp1, "A") 7058 createConn(sp2, "A") 7059 createConn(sp1, "B") 7060 createConn(sp2, "B") 7061 7062 check := func(subConn *nats.Conn, subj string, checkA, checkB bool) { 7063 t.Helper() 7064 sub := natsSubSync(t, subConn, subj) 7065 defer sub.Unsubscribe() 7066 7067 checkSubInterest(t, sh1, "HA", subj, time.Second) 7068 checkSubInterest(t, sh2, "HA", subj, time.Second) 7069 if checkA { 7070 checkSubInterest(t, sp1, "A", subj, time.Second) 7071 checkSubInterest(t, sp2, "A", subj, time.Second) 7072 } 7073 if checkB { 7074 checkSubInterest(t, sp1, "B", subj, time.Second) 7075 checkSubInterest(t, sp2, "B", subj, time.Second) 7076 } 7077 7078 for i, ncp := range conns { 7079 // Don't publish from account "A" connections if we are 7080 // dealing with account "B", and vice-versa. 7081 if !checkA && i >= 2 && i <= 3 { 7082 continue 7083 } 7084 if !checkB && i >= 4 { 7085 continue 7086 } 7087 natsPub(t, ncp, subj, []byte("hello")) 7088 natsNexMsg(t, sub, time.Second) 7089 // Make sure we don't get a duplicate 7090 if msg, err := sub.NextMsg(50 * time.Millisecond); err != nats.ErrTimeout { 7091 t.Fatalf("Unexpected message or error: msg=%v - err=%v", msg, err) 7092 } 7093 } 7094 } 7095 check(conns[0], "HA.1", true, true) 7096 check(conns[1], "HA.2", true, true) 7097 check(conns[2], "SPA.1", true, false) 7098 check(conns[3], "SPA.2", true, false) 7099 check(conns[4], "SPB.1", false, true) 7100 check(conns[5], "SPB.2", false, true) 7101 }) 7102 } 7103 } 7104 7105 func TestLeafNodeSameLocalAccountToMultipleHubs(t *testing.T) { 7106 hub1Conf := createConfFile(t, []byte(` 7107 port: -1 7108 server_name: hub1 7109 accounts { 7110 hub1 { users: [{user: hub1, password: pwd}] } 7111 } 7112 leafnodes { 7113 port: -1 7114 } 7115 `)) 7116 sh1, oh1 := RunServerWithConfig(hub1Conf) 7117 defer sh1.Shutdown() 7118 7119 hub2Conf := createConfFile(t, []byte(` 7120 port: -1 7121 server_name: hub2 7122 accounts { 7123 hub2 { users: [{user: hub2, password: pwd}] } 7124 } 7125 leafnodes { 7126 port: -1 7127 } 7128 `)) 7129 sh2, oh2 := RunServerWithConfig(hub2Conf) 7130 defer sh2.Shutdown() 7131 7132 lconf := createConfFile(t, []byte(fmt.Sprintf(` 7133 port: -1 7134 server_name: leaf 7135 accounts { 7136 A { users: [{user: A, password: pwd}] } 7137 B { users: [{user: B, password: pwd}] } 7138 C { users: [{user: C, password: pwd}] } 7139 } 7140 leafnodes { 7141 port: -1 7142 remotes [ 7143 { 7144 url: nats://hub1:pwd@127.0.0.1:%[1]d 7145 local: "A" 7146 } 7147 { 7148 url: nats://hub1:pwd@127.0.0.1:%[1]d 7149 local: "C" 7150 } 7151 { 7152 url: nats://hub2:pwd@127.0.0.1:%[2]d 7153 local: "A" 7154 } 7155 { 7156 url: nats://hub2:pwd@127.0.0.1:%[2]d 7157 local: "B" 7158 } 7159 ] 7160 } 7161 `, oh1.LeafNode.Port, oh2.LeafNode.Port))) 7162 s, _ := RunServerWithConfig(lconf) 7163 defer s.Shutdown() 7164 7165 // The leafnode to hub1 should have 2 connections (A and C) 7166 // while the one to hub2 should have 2 connections (A and B) 7167 checkLeafNodeConnectedCount(t, sh1, 2) 7168 checkLeafNodeConnectedCount(t, sh2, 2) 7169 checkLeafNodeConnectedCount(t, s, 4) 7170 7171 nca := natsConnect(t, s.ClientURL(), nats.UserInfo("A", "pwd")) 7172 defer nca.Close() 7173 suba := natsSubSync(t, nca, "A") 7174 ncb := natsConnect(t, s.ClientURL(), nats.UserInfo("B", "pwd")) 7175 defer ncb.Close() 7176 subb := natsSubSync(t, ncb, "B") 7177 ncc := natsConnect(t, s.ClientURL(), nats.UserInfo("C", "pwd")) 7178 defer ncc.Close() 7179 subc := natsSubSync(t, ncc, "C") 7180 7181 checkSubInterest(t, sh1, "hub1", "A", time.Second) 7182 checkSubNoInterest(t, sh1, "hub1", "B", time.Second) 7183 checkSubInterest(t, sh1, "hub1", "C", time.Second) 7184 7185 checkSubInterest(t, sh2, "hub2", "A", time.Second) 7186 checkSubInterest(t, sh2, "hub2", "B", time.Second) 7187 checkSubNoInterest(t, sh2, "hub2", "C", time.Second) 7188 7189 nch1 := natsConnect(t, sh1.ClientURL(), nats.UserInfo("hub1", "pwd")) 7190 defer nch1.Close() 7191 nch2 := natsConnect(t, sh2.ClientURL(), nats.UserInfo("hub2", "pwd")) 7192 defer nch2.Close() 7193 7194 checkNoMsg := func(sub *nats.Subscription) { 7195 t.Helper() 7196 if msg, err := sub.NextMsg(50 * time.Millisecond); err != nats.ErrTimeout { 7197 t.Fatalf("Unexpected message: %s", msg.Data) 7198 } 7199 } 7200 7201 checkSub := func(sub *nats.Subscription, subj, payload string) { 7202 t.Helper() 7203 msg := natsNexMsg(t, sub, time.Second) 7204 require_Equal(t, subj, msg.Subject) 7205 require_Equal(t, payload, string(msg.Data)) 7206 // Make sure we don't get duplicates 7207 checkNoMsg(sub) 7208 } 7209 7210 natsPub(t, nch1, "A", []byte("msgA1")) 7211 checkSub(suba, "A", "msgA1") 7212 natsPub(t, nch1, "B", []byte("msgB1")) 7213 checkNoMsg(subb) 7214 natsPub(t, nch1, "C", []byte("msgC1")) 7215 checkSub(subc, "C", "msgC1") 7216 7217 natsPub(t, nch2, "A", []byte("msgA2")) 7218 checkSub(suba, "A", "msgA2") 7219 natsPub(t, nch2, "B", []byte("msgB2")) 7220 checkSub(subb, "B", "msgB2") 7221 natsPub(t, nch2, "C", []byte("msgC2")) 7222 checkNoMsg(subc) 7223 } 7224 7225 func TestLeafNodeSlowConsumer(t *testing.T) { 7226 ao := DefaultOptions() 7227 ao.LeafNode.Host = "127.0.0.1" 7228 ao.LeafNode.Port = -1 7229 ao.WriteDeadline = 1 * time.Millisecond 7230 a := RunServer(ao) 7231 defer a.Shutdown() 7232 7233 c, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", ao.LeafNode.Port)) 7234 if err != nil { 7235 t.Fatalf("Error connecting: %v", err) 7236 } 7237 time.Sleep(5 * time.Millisecond) 7238 a.mu.Lock() 7239 checkFor(t, 2*time.Second, 15*time.Millisecond, func() error { 7240 a.grMu.Lock() 7241 defer a.grMu.Unlock() 7242 for _, cli := range a.grTmpClients { 7243 cli.out.wdl = time.Nanosecond 7244 return nil 7245 } 7246 return nil 7247 }) 7248 a.mu.Unlock() 7249 7250 // Only leafnode slow consumers that made it past connect are tracked 7251 // in the slow consumers counter. 7252 if _, err := c.Write([]byte("CONNECT {}\r\n")); err != nil { 7253 t.Fatalf("Error writing connect: %v", err) 7254 } 7255 // Read info 7256 br := bufio.NewReader(c) 7257 br.ReadLine() 7258 for i := 0; i < 10; i++ { 7259 if _, err := c.Write([]byte("PING\r\n")); err != nil { 7260 t.Fatalf("Unexpected error writing PING: %v", err) 7261 } 7262 } 7263 defer c.Close() 7264 timeout := time.Now().Add(time.Second) 7265 var ( 7266 got uint64 7267 expected uint64 = 1 7268 ) 7269 for time.Now().Before(timeout) { 7270 got = a.NumSlowConsumersLeafs() 7271 if got == expected { 7272 return 7273 } 7274 time.Sleep(1 * time.Millisecond) 7275 } 7276 t.Fatalf("Timed out waiting for slow consumer leafnodes, got: %v, expected: %v", got, expected) 7277 } 7278 7279 // https://github.com/nats-io/nats-server/issues/4367 7280 func TestLeafNodeDQMultiAccountExportImport(t *testing.T) { 7281 bConf := createConfFile(t, []byte(` 7282 listen: 127.0.0.1:-1 7283 server_name: cluster-b-0 7284 accounts { 7285 $SYS: { users: [ { user: admin, password: pwd } ] }, 7286 AGG: { 7287 exports: [ { service: "PING.>" } ] 7288 users: [ { user: agg, password: agg } ] 7289 } 7290 } 7291 leaf { listen: 127.0.0.1:-1 } 7292 `)) 7293 7294 sb, ob := RunServerWithConfig(bConf) 7295 defer sb.Shutdown() 7296 7297 tmpl := ` 7298 listen: 127.0.0.1:-1 7299 server_name: %s 7300 jetstream: { store_dir: '%s' } 7301 cluster { 7302 name: %s 7303 listen: 127.0.0.1:%d 7304 routes = [%s] 7305 } 7306 accounts { 7307 $SYS: { users: [ { user: admin, password: pwd } ] }, 7308 A: { 7309 mappings: { "A.>" : ">" } 7310 exports: [ { service: A.> } ] 7311 users: [ { user: a, password: a } ] 7312 }, 7313 AGG: { 7314 imports: [ { service: { subject: A.>, account: A } } ] 7315 users: [ { user: agg, password: agg } ] 7316 }, 7317 } 7318 leaf { 7319 remotes: [ { 7320 urls: [ nats-leaf://agg:agg@127.0.0.1:{LEAF_PORT} ] 7321 account: AGG 7322 } ] 7323 } 7324 ` 7325 tmpl = strings.Replace(tmpl, "{LEAF_PORT}", fmt.Sprintf("%d", ob.LeafNode.Port), 1) 7326 c := createJetStreamCluster(t, tmpl, "cluster-a", "cluster-a-", 3, 22110, false) 7327 defer c.shutdown() 7328 7329 // Make sure all servers are connected via leafnode to the hub, the b server. 7330 for _, s := range c.servers { 7331 checkLeafNodeConnectedCount(t, s, 1) 7332 } 7333 7334 // Connect to a server in the cluster and create a DQ listener. 7335 nc, _ := jsClientConnect(t, c.randomServer(), nats.UserInfo("a", "a")) 7336 defer nc.Close() 7337 7338 var got atomic.Int32 7339 7340 natsQueueSub(t, nc, "PING", "Q", func(m *nats.Msg) { 7341 got.Add(1) 7342 m.Respond([]byte("REPLY")) 7343 }) 7344 7345 // Now connect to B and send the request. 7346 ncb, _ := jsClientConnect(t, sb, nats.UserInfo("agg", "agg")) 7347 defer ncb.Close() 7348 7349 _, err := ncb.Request("A.PING", []byte("REQUEST"), time.Second) 7350 require_NoError(t, err) 7351 require_Equal(t, got.Load(), 1) 7352 } 7353 7354 // https://github.com/nats-io/nats-server/issues/4934 7355 func TestLeafNodeServerReloadSubjectMappings(t *testing.T) { 7356 stmpl := ` 7357 listen: 127.0.0.1:-1 7358 server_name: test-server 7359 mappings = { "source1": "target" } 7360 leaf { listen: 127.0.0.1:-1 } 7361 ` 7362 conf := createConfFile(t, []byte(stmpl)) 7363 s, o := RunServerWithConfig(conf) 7364 defer s.Shutdown() 7365 7366 tmpl := ` 7367 listen: 127.0.0.1:-1 7368 server_name: test-leaf 7369 leaf { 7370 remotes: [ { 7371 urls: [ nats-leaf://127.0.0.1:{LEAF_PORT} ] 7372 } ] 7373 } 7374 ` 7375 tmpl = strings.Replace(tmpl, "{LEAF_PORT}", fmt.Sprintf("%d", o.LeafNode.Port), 1) 7376 lConf := createConfFile(t, []byte(tmpl)) 7377 l, _ := RunServerWithConfig(lConf) 7378 defer l.Shutdown() 7379 7380 checkLeafNodeConnected(t, l) 7381 7382 // Create our subscriber. 7383 nc := natsConnect(t, s.ClientURL()) 7384 defer nc.Close() 7385 sub := natsSubSync(t, nc, "target") 7386 natsFlush(t, nc) 7387 7388 // Create our publisher. 7389 ncl := natsConnect(t, l.ClientURL()) 7390 defer ncl.Close() 7391 // Publish our message. 7392 ncl.Publish("source1", []byte("OK")) 7393 7394 // Make sure we receive it. 7395 checkSubsPending(t, sub, 1) 7396 7397 // Now change mapping. 7398 reloadUpdateConfig(t, s, conf, strings.Replace(stmpl, "source1", "source2", 1)) 7399 // Also make sure we do not have subscription interest for source1 on leaf anymore. 7400 checkSubInterest(t, l, globalAccountName, "source2", 2*time.Second) 7401 7402 // Publish our new message. 7403 ncl.Publish("source2", []byte("OK")) 7404 7405 // Make sure we receive it. 7406 checkSubsPending(t, sub, 2) 7407 7408 // Also make sure we do not have subscription interest for source1 on leaf anymore. 7409 checkSubNoInterest(t, l, globalAccountName, "source1", 2*time.Second) 7410 } 7411 7412 // https://github.com/nats-io/nats-server/issues/5099 7413 func TestLeafNodeServerReloadSubjectMappingsWithSameSubject(t *testing.T) { 7414 stmpl := ` 7415 listen: 127.0.0.1:-1 7416 server_name: test-server 7417 mappings = { "source": "target1" } 7418 leaf { listen: 127.0.0.1:-1 } 7419 ` 7420 conf := createConfFile(t, []byte(stmpl)) 7421 s, o := RunServerWithConfig(conf) 7422 defer s.Shutdown() 7423 7424 tmpl := ` 7425 listen: 127.0.0.1:-1 7426 server_name: test-leaf 7427 leaf { 7428 remotes: [ { urls: [ nats-leaf://127.0.0.1:{LEAF_PORT} ] } ] 7429 } 7430 ` 7431 tmpl = strings.Replace(tmpl, "{LEAF_PORT}", fmt.Sprintf("%d", o.LeafNode.Port), 1) 7432 lConf := createConfFile(t, []byte(tmpl)) 7433 l, _ := RunServerWithConfig(lConf) 7434 defer l.Shutdown() 7435 7436 checkLeafNodeConnected(t, l) 7437 7438 // Create our subscriber. 7439 nc := natsConnect(t, s.ClientURL()) 7440 defer nc.Close() 7441 sub1 := natsSubSync(t, nc, "target1") 7442 sub2 := natsSubSync(t, nc, "target2") 7443 natsFlush(t, nc) 7444 7445 // Create our publisher. 7446 ncl := natsConnect(t, l.ClientURL()) 7447 defer ncl.Close() 7448 // Publish our message. 7449 ncl.Publish("source", []byte("OK")) 7450 7451 // Make sure we receive it. 7452 checkSubsPending(t, sub1, 1) 7453 // Make sure the other does not. 7454 checkSubsPending(t, sub2, 0) 7455 7456 // Now change mapping, but only the "to" subject, keeping same "from" 7457 reloadUpdateConfig(t, s, conf, strings.Replace(stmpl, "target1", "target2", 1)) 7458 checkLeafNodeConnected(t, l) 7459 7460 // Publish our new message. 7461 ncl.Publish("source", []byte("OK")) 7462 7463 // Make sure we receive it. 7464 checkSubsPending(t, sub2, 1) 7465 // Make sure the other does not. 7466 checkSubsPending(t, sub1, 1) 7467 } 7468 7469 func TestLeafNodeNkeyAuth(t *testing.T) { 7470 conf := createConfFile(t, []byte(` 7471 listen: 127.0.0.1:-1 7472 server_name: test-server 7473 leaf { 7474 listen: 127.0.0.1:-1 7475 authorization: { nkey: UCSTG5CRF5GEJERAFKUUYRODGABTBVWY2NPE4GGKRQVQOH74PIAKTVKO } 7476 } 7477 `)) 7478 s, o := RunServerWithConfig(conf) 7479 defer s.Shutdown() 7480 7481 tmpl := ` 7482 listen: 127.0.0.1:-1 7483 server_name: test-leaf 7484 leaf { 7485 remotes: [ { 7486 url: nats-leaf://127.0.0.1:{LEAF_PORT} 7487 seed: SUACJN3OSKWWPQXME4JUNFJ3PARXPO657GGNWNU7PK7G3AUQQYHLW26XH4 7488 } ] 7489 } 7490 ` 7491 tmpl = strings.Replace(tmpl, "{LEAF_PORT}", fmt.Sprintf("%d", o.LeafNode.Port), 1) 7492 lConf := createConfFile(t, []byte(tmpl)) 7493 l, _ := RunServerWithConfig(lConf) 7494 defer l.Shutdown() 7495 7496 checkLeafNodeConnected(t, l) 7497 } 7498 7499 func TestLeafNodeAccountNkeysAuth(t *testing.T) { 7500 conf := createConfFile(t, []byte(` 7501 listen: 127.0.0.1:-1 7502 server_name: test-server 7503 leaf { 7504 listen: 127.0.0.1:-1 7505 } 7506 accounts { 7507 A { users [ {nkey: UCSTG5CRF5GEJERAFKUUYRODGABTBVWY2NPE4GGKRQVQOH74PIAKTVKO } ] } 7508 } 7509 `)) 7510 s, o := RunServerWithConfig(conf) 7511 defer s.Shutdown() 7512 7513 tmpl := ` 7514 listen: 127.0.0.1:-1 7515 server_name: test-leaf 7516 leaf { 7517 remotes: [ { 7518 url: nats-leaf://127.0.0.1:{LEAF_PORT} 7519 seed: SUACJN3OSKWWPQXME4JUNFJ3PARXPO657GGNWNU7PK7G3AUQQYHLW26XH4 7520 } ] 7521 } 7522 ` 7523 tmpl = strings.Replace(tmpl, "{LEAF_PORT}", fmt.Sprintf("%d", o.LeafNode.Port), 1) 7524 lConf := createConfFile(t, []byte(tmpl)) 7525 l, _ := RunServerWithConfig(lConf) 7526 defer l.Shutdown() 7527 7528 checkLeafNodeConnected(t, l) 7529 } 7530 7531 // https://github.com/nats-io/nats-server/issues/5117 7532 func TestLeafNodeLoopDetectionOnActualLoop(t *testing.T) { 7533 // Setup: B --[leaf]--> A C --[leaf]--> A C --[leaf] --> B 7534 accConf := ` 7535 accounts: { 7536 APP: { 7537 users: [ { user:u, password: u, 7538 permissions: { publish = "u.>", subscribe = "u.>" }} ] 7539 } 7540 $SYS: { users = [ {user: "s", password: "s"} ] } 7541 }` 7542 7543 confA := createConfFile(t, []byte(fmt.Sprintf(` 7544 server_name: a1 7545 port: -1 7546 cluster: { name: A } 7547 leafnodes { 7548 port: 17422 7549 } 7550 %s`, accConf))) 7551 7552 confB := createConfFile(t, []byte(fmt.Sprintf(` 7553 server_name: b1 7554 port: -1 7555 cluster: { name: B } 7556 leafnodes { 7557 port: 17432 7558 remotes [ 7559 { urls: ["nats-leaf://u:u@localhost:17422"], account: "APP" } 7560 ] 7561 reconnect: "2s" 7562 } 7563 %s`, accConf))) 7564 7565 confC := createConfFile(t, []byte(fmt.Sprintf(` 7566 server_name: c1 7567 port: -1 7568 cluster: { name: C } 7569 leafnodes { 7570 port: 17442 7571 remotes [ 7572 { urls: ["nats-leaf://u:u@localhost:17422"], account: "APP" } 7573 # This one creates the loop 7574 { urls: ["nats-leaf://u:u@localhost:17432"], account: "APP" } 7575 ] 7576 reconnect: "0.5s" 7577 } 7578 %s`, accConf))) 7579 7580 // Start order will be B -> C -> A 7581 // We will force C to connect to A first before B using different reconnect intervals. 7582 // If B connects first we detect loops fine. If C connects first we do not. 7583 7584 srvB, _ := RunServerWithConfig(confB) 7585 defer srvB.Shutdown() 7586 lb := &loopDetectedLogger{ch: make(chan string, 1)} 7587 srvB.SetLogger(lb, false, false) 7588 7589 srvC, _ := RunServerWithConfig(confC) 7590 defer srvC.Shutdown() 7591 lc := &loopDetectedLogger{ch: make(chan string, 1)} 7592 srvC.SetLogger(lc, false, false) 7593 7594 // C should connect to B 7595 checkLeafNodeConnectedCount(t, srvC, 1) 7596 7597 srvA, _ := RunServerWithConfig(confA) 7598 defer srvA.Shutdown() 7599 la := &loopDetectedLogger{ch: make(chan string, 1)} 7600 srvA.SetLogger(la, false, false) 7601 7602 select { 7603 case <-la.ch: 7604 case <-lb.ch: 7605 case <-lc.ch: 7606 case <-time.After(5 * time.Second): 7607 t.Fatalf("Did not get any error regarding loop") 7608 } 7609 }