get.pme.sh/pnats@v0.0.0-20240304004023-26bb5a137ed0/server/reload_test.go (about) 1 // Copyright 2017-2022 The NATS Authors 2 // Licensed under the Apache License, Version 2.0 (the "License"); 3 // you may not use this file except in compliance with the License. 4 // You may obtain a copy of the License at 5 // 6 // http://www.apache.org/licenses/LICENSE-2.0 7 // 8 // Unless required by applicable law or agreed to in writing, software 9 // distributed under the License is distributed on an "AS IS" BASIS, 10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package server 15 16 import ( 17 "bytes" 18 "crypto/tls" 19 "encoding/base64" 20 "encoding/json" 21 "flag" 22 "fmt" 23 "io" 24 "log" 25 "net" 26 "net/http" 27 "net/http/httptest" 28 "net/url" 29 "os" 30 "path/filepath" 31 "reflect" 32 "runtime" 33 "strings" 34 "sync" 35 "testing" 36 "time" 37 38 "github.com/nats-io/jwt/v2" 39 "github.com/nats-io/nats.go" 40 "github.com/nats-io/nkeys" 41 ) 42 43 func newServerWithConfig(t *testing.T, configFile string) (*Server, *Options, string) { 44 t.Helper() 45 content, err := os.ReadFile(configFile) 46 if err != nil { 47 t.Fatalf("Error loading file: %v", err) 48 } 49 return newServerWithContent(t, content) 50 } 51 52 func newServerWithContent(t *testing.T, content []byte) (*Server, *Options, string) { 53 t.Helper() 54 opts, tmpFile := newOptionsFromContent(t, content) 55 return New(opts), opts, tmpFile 56 } 57 58 func newOptionsFromContent(t *testing.T, content []byte) (*Options, string) { 59 t.Helper() 60 tmpFile := createConfFile(t, content) 61 opts, err := ProcessConfigFile(tmpFile) 62 if err != nil { 63 t.Fatalf("Error processing config file: %v", err) 64 } 65 opts.NoSigs = true 66 return opts, tmpFile 67 } 68 69 func createConfFile(t testing.TB, content []byte) string { 70 t.Helper() 71 conf := createTempFile(t, _EMPTY_) 72 fName := conf.Name() 73 conf.Close() 74 if err := os.WriteFile(fName, content, 0666); err != nil { 75 t.Fatalf("Error writing conf file: %v", err) 76 } 77 return fName 78 } 79 80 func runReloadServerWithConfig(t *testing.T, configFile string) (*Server, *Options, string) { 81 t.Helper() 82 content, err := os.ReadFile(configFile) 83 if err != nil { 84 t.Fatalf("Error loading file: %v", err) 85 } 86 return runReloadServerWithContent(t, content) 87 } 88 89 func runReloadServerWithContent(t *testing.T, content []byte) (*Server, *Options, string) { 90 t.Helper() 91 opts, tmpFile := newOptionsFromContent(t, content) 92 opts.NoLog = true 93 opts.NoSigs = true 94 s := RunServer(opts) 95 return s, opts, tmpFile 96 } 97 98 func changeCurrentConfigContent(t *testing.T, curConfig, newConfig string) { 99 t.Helper() 100 content, err := os.ReadFile(newConfig) 101 if err != nil { 102 t.Fatalf("Error loading file: %v", err) 103 } 104 changeCurrentConfigContentWithNewContent(t, curConfig, content) 105 } 106 107 func changeCurrentConfigContentWithNewContent(t *testing.T, curConfig string, content []byte) { 108 t.Helper() 109 if err := os.WriteFile(curConfig, content, 0666); err != nil { 110 t.Fatalf("Error writing config: %v", err) 111 } 112 } 113 114 // Ensure Reload returns an error when attempting to reload a server that did 115 // not start with a config file. 116 func TestConfigReloadNoConfigFile(t *testing.T) { 117 server := New(&Options{NoSigs: true}) 118 loaded := server.ConfigTime() 119 if server.Reload() == nil { 120 t.Fatal("Expected Reload to return an error") 121 } 122 if reloaded := server.ConfigTime(); reloaded != loaded { 123 t.Fatalf("ConfigTime is incorrect.\nexpected: %s\ngot: %s", loaded, reloaded) 124 } 125 } 126 127 // Ensure Reload returns an error when attempting to change an option which 128 // does not support reloading. 129 func TestConfigReloadUnsupported(t *testing.T) { 130 server, _, config := newServerWithConfig(t, "./configs/reload/test.conf") 131 defer server.Shutdown() 132 133 loaded := server.ConfigTime() 134 135 golden := &Options{ 136 ConfigFile: config, 137 Host: "0.0.0.0", 138 Port: 2233, 139 AuthTimeout: float64(AUTH_TIMEOUT / time.Second), 140 Debug: false, 141 Trace: false, 142 Logtime: false, 143 MaxControlLine: 4096, 144 MaxPayload: 1048576, 145 MaxConn: 65536, 146 PingInterval: 2 * time.Minute, 147 MaxPingsOut: 2, 148 WriteDeadline: 10 * time.Second, 149 Cluster: ClusterOpts{ 150 Name: "abc", 151 Host: "127.0.0.1", 152 Port: -1, 153 }, 154 NoSigs: true, 155 } 156 setBaselineOptions(golden) 157 158 checkOptionsEqual(t, golden, server.getOpts()) 159 160 // Change config file to bad config. 161 changeCurrentConfigContent(t, config, "./configs/reload/reload_unsupported.conf") 162 163 // This should fail because `cluster` host cannot be changed. 164 if err := server.Reload(); err == nil { 165 t.Fatal("Expected Reload to return an error") 166 } 167 168 // Ensure config didn't change. 169 checkOptionsEqual(t, golden, server.getOpts()) 170 171 if reloaded := server.ConfigTime(); reloaded != loaded { 172 t.Fatalf("ConfigTime is incorrect.\nexpected: %s\ngot: %s", loaded, reloaded) 173 } 174 } 175 176 // This checks that if we change an option that does not support hot-swapping 177 // we get an error. Using `listen` for now (test may need to be updated if 178 // server is changed to support change of listen spec). 179 func TestConfigReloadUnsupportedHotSwapping(t *testing.T) { 180 server, _, config := newServerWithContent(t, []byte("listen: 127.0.0.1:-1")) 181 defer server.Shutdown() 182 183 loaded := server.ConfigTime() 184 185 time.Sleep(time.Millisecond) 186 187 // Change config file with unsupported option hot-swap 188 changeCurrentConfigContentWithNewContent(t, config, []byte("listen: 127.0.0.1:9999")) 189 190 // This should fail because `listen` host cannot be changed. 191 if err := server.Reload(); err == nil || !strings.Contains(err.Error(), "not supported") { 192 t.Fatalf("Expected Reload to return a not supported error, got %v", err) 193 } 194 195 if reloaded := server.ConfigTime(); reloaded != loaded { 196 t.Fatalf("ConfigTime is incorrect.\nexpected: %s\ngot: %s", loaded, reloaded) 197 } 198 } 199 200 // Ensure Reload returns an error when reloading from a bad config file. 201 func TestConfigReloadInvalidConfig(t *testing.T) { 202 server, _, config := newServerWithConfig(t, "./configs/reload/test.conf") 203 defer server.Shutdown() 204 205 loaded := server.ConfigTime() 206 207 golden := &Options{ 208 ConfigFile: config, 209 Host: "0.0.0.0", 210 Port: 2233, 211 AuthTimeout: float64(AUTH_TIMEOUT / time.Second), 212 Debug: false, 213 Trace: false, 214 Logtime: false, 215 MaxControlLine: 4096, 216 MaxPayload: 1048576, 217 MaxConn: 65536, 218 PingInterval: 2 * time.Minute, 219 MaxPingsOut: 2, 220 WriteDeadline: 10 * time.Second, 221 Cluster: ClusterOpts{ 222 Name: "abc", 223 Host: "127.0.0.1", 224 Port: -1, 225 }, 226 NoSigs: true, 227 } 228 setBaselineOptions(golden) 229 230 checkOptionsEqual(t, golden, server.getOpts()) 231 232 // Change config file to bad config. 233 changeCurrentConfigContent(t, config, "./configs/reload/invalid.conf") 234 235 // This should fail because the new config should not parse. 236 if err := server.Reload(); err == nil { 237 t.Fatal("Expected Reload to return an error") 238 } 239 240 // Ensure config didn't change. 241 checkOptionsEqual(t, golden, server.getOpts()) 242 243 if reloaded := server.ConfigTime(); reloaded != loaded { 244 t.Fatalf("ConfigTime is incorrect.\nexpected: %s\ngot: %s", loaded, reloaded) 245 } 246 } 247 248 // Ensure Reload returns nil and the config is changed on success. 249 func TestConfigReload(t *testing.T) { 250 server, opts, config := runReloadServerWithConfig(t, "./configs/reload/test.conf") 251 defer removeFile(t, "nats-server.pid") 252 defer removeFile(t, "nats-server.log") 253 defer server.Shutdown() 254 255 var content []byte 256 if runtime.GOOS != "windows" { 257 content = []byte(` 258 remote_syslog: "udp://127.0.0.1:514" # change on reload 259 syslog: true # enable on reload 260 `) 261 } 262 platformConf := filepath.Join(filepath.Dir(config), "platform.conf") 263 if err := os.WriteFile(platformConf, content, 0666); err != nil { 264 t.Fatalf("Unable to write config file: %v", err) 265 } 266 267 loaded := server.ConfigTime() 268 269 golden := &Options{ 270 ConfigFile: config, 271 Host: "0.0.0.0", 272 Port: 2233, 273 AuthTimeout: float64(AUTH_TIMEOUT / time.Second), 274 Debug: false, 275 Trace: false, 276 NoLog: true, 277 Logtime: false, 278 MaxControlLine: 4096, 279 MaxPayload: 1048576, 280 MaxConn: 65536, 281 PingInterval: 2 * time.Minute, 282 MaxPingsOut: 2, 283 WriteDeadline: 10 * time.Second, 284 Cluster: ClusterOpts{ 285 Name: "abc", 286 Host: "127.0.0.1", 287 Port: server.ClusterAddr().Port, 288 }, 289 NoSigs: true, 290 } 291 setBaselineOptions(golden) 292 293 checkOptionsEqual(t, golden, opts) 294 295 // Change config file to new config. 296 changeCurrentConfigContent(t, config, "./configs/reload/reload.conf") 297 298 if err := server.Reload(); err != nil { 299 t.Fatalf("Error reloading config: %v", err) 300 } 301 302 // Ensure config changed. 303 updated := server.getOpts() 304 if !updated.Trace { 305 t.Fatal("Expected Trace to be true") 306 } 307 if !updated.Debug { 308 t.Fatal("Expected Debug to be true") 309 } 310 if !updated.Logtime { 311 t.Fatal("Expected Logtime to be true") 312 } 313 if !updated.LogtimeUTC { 314 t.Fatal("Expected LogtimeUTC to be true") 315 } 316 if runtime.GOOS != "windows" { 317 if !updated.Syslog { 318 t.Fatal("Expected Syslog to be true") 319 } 320 if updated.RemoteSyslog != "udp://127.0.0.1:514" { 321 t.Fatalf("RemoteSyslog is incorrect.\nexpected: udp://127.0.0.1:514\ngot: %s", updated.RemoteSyslog) 322 } 323 } 324 if updated.LogFile != "nats-server.log" { 325 t.Fatalf("LogFile is incorrect.\nexpected: nats-server.log\ngot: %s", updated.LogFile) 326 } 327 if updated.TLSConfig == nil { 328 t.Fatal("Expected TLSConfig to be non-nil") 329 } 330 if !server.info.TLSRequired { 331 t.Fatal("Expected TLSRequired to be true") 332 } 333 if !server.info.TLSVerify { 334 t.Fatal("Expected TLSVerify to be true") 335 } 336 if updated.Username != "tyler" { 337 t.Fatalf("Username is incorrect.\nexpected: tyler\ngot: %s", updated.Username) 338 } 339 if updated.Password != "T0pS3cr3t" { 340 t.Fatalf("Password is incorrect.\nexpected: T0pS3cr3t\ngot: %s", updated.Password) 341 } 342 if updated.AuthTimeout != 2 { 343 t.Fatalf("AuthTimeout is incorrect.\nexpected: 2\ngot: %f", updated.AuthTimeout) 344 } 345 if !server.info.AuthRequired { 346 t.Fatal("Expected AuthRequired to be true") 347 } 348 if !updated.Cluster.NoAdvertise { 349 t.Fatal("Expected NoAdvertise to be true") 350 } 351 if updated.Cluster.PingInterval != 20*time.Second { 352 t.Fatalf("Cluster PingInterval is incorrect.\nexpected: 20s\ngot: %v", updated.Cluster.PingInterval) 353 } 354 if updated.Cluster.MaxPingsOut != 8 { 355 t.Fatalf("Cluster MaxPingsOut is incorrect.\nexpected: 6\ngot: %v", updated.Cluster.MaxPingsOut) 356 } 357 if updated.PidFile != "nats-server.pid" { 358 t.Fatalf("PidFile is incorrect.\nexpected: nats-server.pid\ngot: %s", updated.PidFile) 359 } 360 if updated.MaxControlLine != 512 { 361 t.Fatalf("MaxControlLine is incorrect.\nexpected: 512\ngot: %d", updated.MaxControlLine) 362 } 363 if updated.PingInterval != 5*time.Second { 364 t.Fatalf("PingInterval is incorrect.\nexpected 5s\ngot: %s", updated.PingInterval) 365 } 366 if updated.MaxPingsOut != 1 { 367 t.Fatalf("MaxPingsOut is incorrect.\nexpected 1\ngot: %d", updated.MaxPingsOut) 368 } 369 if updated.WriteDeadline != 3*time.Second { 370 t.Fatalf("WriteDeadline is incorrect.\nexpected 3s\ngot: %s", updated.WriteDeadline) 371 } 372 if updated.MaxPayload != 1024 { 373 t.Fatalf("MaxPayload is incorrect.\nexpected 1024\ngot: %d", updated.MaxPayload) 374 } 375 376 if reloaded := server.ConfigTime(); !reloaded.After(loaded) { 377 t.Fatalf("ConfigTime is incorrect.\nexpected greater than: %s\ngot: %s", loaded, reloaded) 378 } 379 } 380 381 // Ensure Reload supports TLS config changes. Test this by starting a server 382 // with TLS enabled, connect to it to verify, reload config using a different 383 // key pair and client verification enabled, ensure reconnect fails, then 384 // ensure reconnect succeeds when the client provides a cert. 385 func TestConfigReloadRotateTLS(t *testing.T) { 386 server, opts, config := runReloadServerWithConfig(t, "./configs/reload/tls_test.conf") 387 defer server.Shutdown() 388 389 // Ensure we can connect as a sanity check. 390 addr := fmt.Sprintf("nats://%s:%d", opts.Host, server.Addr().(*net.TCPAddr).Port) 391 392 nc, err := nats.Connect(addr, nats.Secure(&tls.Config{InsecureSkipVerify: true})) 393 if err != nil { 394 t.Fatalf("Error creating client: %v", err) 395 } 396 defer nc.Close() 397 sub, err := nc.SubscribeSync("foo") 398 if err != nil { 399 t.Fatalf("Error subscribing: %v", err) 400 } 401 defer sub.Unsubscribe() 402 403 // Rotate cert and enable client verification. 404 changeCurrentConfigContent(t, config, "./configs/reload/tls_verify_test.conf") 405 if err := server.Reload(); err != nil { 406 t.Fatalf("Error reloading config: %v", err) 407 } 408 409 // Ensure connecting fails. 410 if _, err := nats.Connect(addr, nats.Secure(&tls.Config{InsecureSkipVerify: true})); err == nil { 411 t.Fatal("Expected connect to fail") 412 } 413 414 // Ensure connecting succeeds when client presents cert. 415 cert := nats.ClientCert("./configs/certs/cert.new.pem", "./configs/certs/key.new.pem") 416 conn, err := nats.Connect(addr, cert, nats.RootCAs("./configs/certs/cert.new.pem")) 417 if err != nil { 418 t.Fatalf("Error creating client: %v", err) 419 } 420 conn.Close() 421 422 // Ensure the original connection can still publish/receive. 423 if err := nc.Publish("foo", []byte("hello")); err != nil { 424 t.Fatalf("Error publishing: %v", err) 425 } 426 nc.Flush() 427 msg, err := sub.NextMsg(2 * time.Second) 428 if err != nil { 429 t.Fatalf("Error receiving msg: %v", err) 430 } 431 if string(msg.Data) != "hello" { 432 t.Fatalf("Msg is incorrect.\nexpected: %+v\ngot: %+v", []byte("hello"), msg.Data) 433 } 434 } 435 436 // Ensure Reload supports enabling TLS. Test this by starting a server without 437 // TLS enabled, connect to it to verify, reload config with TLS enabled, ensure 438 // reconnect fails, then ensure reconnect succeeds when using secure. 439 func TestConfigReloadEnableTLS(t *testing.T) { 440 server, opts, config := runReloadServerWithConfig(t, "./configs/reload/basic.conf") 441 defer server.Shutdown() 442 443 // Ensure we can connect as a sanity check. 444 addr := fmt.Sprintf("nats://%s:%d", opts.Host, server.Addr().(*net.TCPAddr).Port) 445 nc, err := nats.Connect(addr) 446 if err != nil { 447 t.Fatalf("Error creating client: %v", err) 448 } 449 nc.Close() 450 451 // Enable TLS. 452 changeCurrentConfigContent(t, config, "./configs/reload/tls_test.conf") 453 if err := server.Reload(); err != nil { 454 t.Fatalf("Error reloading config: %v", err) 455 } 456 457 // Ensure connecting is OK (we need to skip server cert verification since 458 // the library is not doing that by default now). 459 nc, err = nats.Connect(addr, nats.Secure(&tls.Config{InsecureSkipVerify: true})) 460 if err != nil { 461 t.Fatalf("Error creating client: %v", err) 462 } 463 nc.Close() 464 } 465 466 // Ensure Reload supports disabling TLS. Test this by starting a server with 467 // TLS enabled, connect to it to verify, reload config with TLS disabled, 468 // ensure reconnect fails, then ensure reconnect succeeds when connecting 469 // without secure. 470 func TestConfigReloadDisableTLS(t *testing.T) { 471 server, opts, config := runReloadServerWithConfig(t, "./configs/reload/tls_test.conf") 472 defer server.Shutdown() 473 474 // Ensure we can connect as a sanity check. 475 addr := fmt.Sprintf("nats://%s:%d", opts.Host, server.Addr().(*net.TCPAddr).Port) 476 nc, err := nats.Connect(addr, nats.Secure(&tls.Config{InsecureSkipVerify: true})) 477 if err != nil { 478 t.Fatalf("Error creating client: %v", err) 479 } 480 nc.Close() 481 482 // Disable TLS. 483 changeCurrentConfigContent(t, config, "./configs/reload/basic.conf") 484 if err := server.Reload(); err != nil { 485 t.Fatalf("Error reloading config: %v", err) 486 } 487 488 // Ensure connecting fails. 489 if _, err := nats.Connect(addr, nats.Secure(&tls.Config{InsecureSkipVerify: true})); err == nil { 490 t.Fatal("Expected connect to fail") 491 } 492 493 // Ensure connecting succeeds when not using secure. 494 nc, err = nats.Connect(addr) 495 if err != nil { 496 t.Fatalf("Error creating client: %v", err) 497 } 498 nc.Close() 499 } 500 501 func TestConfigReloadRotateTLSMultiCert(t *testing.T) { 502 server, opts, config := runReloadServerWithConfig(t, "./configs/reload/tls_multi_cert_1.conf") 503 defer server.Shutdown() 504 505 // Ensure we can connect as a sanity check. 506 addr := fmt.Sprintf("nats://%s:%d", opts.Host, server.Addr().(*net.TCPAddr).Port) 507 508 rawCerts := make(chan []byte, 3) 509 nc, err := nats.Connect(addr, nats.Secure(&tls.Config{ 510 VerifyConnection: func(s tls.ConnectionState) error { 511 rawCerts <- s.PeerCertificates[0].Raw 512 return nil 513 }, 514 InsecureSkipVerify: true, 515 })) 516 if err != nil { 517 t.Fatalf("Error creating client: %v", err) 518 } 519 defer nc.Close() 520 sub, err := nc.SubscribeSync("foo") 521 if err != nil { 522 t.Fatalf("Error subscribing: %v", err) 523 } 524 defer sub.Unsubscribe() 525 526 // Rotate cert and enable client verification. 527 changeCurrentConfigContent(t, config, "./configs/reload/tls_multi_cert_2.conf") 528 if err := server.Reload(); err != nil { 529 t.Fatalf("Error reloading config: %v", err) 530 } 531 532 // Ensure connecting fails. 533 if _, err := nats.Connect(addr, nats.Secure(&tls.Config{InsecureSkipVerify: true})); err == nil { 534 t.Fatal("Expected connect to fail") 535 } 536 537 // Ensure connecting succeeds when client presents cert. 538 cert := nats.ClientCert("../test/configs/certs/client-cert.pem", "../test/configs/certs/client-key.pem") 539 conn, err := nats.Connect(addr, cert, nats.RootCAs("../test/configs/certs/ca.pem"), nats.Secure(&tls.Config{ 540 VerifyConnection: func(s tls.ConnectionState) error { 541 rawCerts <- s.PeerCertificates[0].Raw 542 return nil 543 }, 544 })) 545 if err != nil { 546 t.Fatalf("Error creating client: %v", err) 547 } 548 conn.Close() 549 550 // Ensure the original connection can still publish/receive. 551 if err := nc.Publish("foo", []byte("hello")); err != nil { 552 t.Fatalf("Error publishing: %v", err) 553 } 554 nc.Flush() 555 msg, err := sub.NextMsg(2 * time.Second) 556 if err != nil { 557 t.Fatalf("Error receiving msg: %v", err) 558 } 559 if string(msg.Data) != "hello" { 560 t.Fatalf("Msg is incorrect.\nexpected: %+v\ngot: %+v", []byte("hello"), msg.Data) 561 } 562 563 // Rotate cert and disable client verification. 564 changeCurrentConfigContent(t, config, "./configs/reload/tls_multi_cert_3.conf") 565 if err := server.Reload(); err != nil { 566 t.Fatalf("Error reloading config: %v", err) 567 } 568 569 nc, err = nats.Connect(addr, nats.Secure(&tls.Config{ 570 VerifyConnection: func(s tls.ConnectionState) error { 571 rawCerts <- s.PeerCertificates[0].Raw 572 return nil 573 }, 574 InsecureSkipVerify: true, 575 })) 576 if err != nil { 577 t.Fatalf("Error creating client: %v", err) 578 } 579 defer nc.Close() 580 sub, err = nc.SubscribeSync("foo") 581 if err != nil { 582 t.Fatalf("Error subscribing: %v", err) 583 } 584 defer sub.Unsubscribe() 585 586 certA := <-rawCerts 587 certB := <-rawCerts 588 certC := <-rawCerts 589 if !bytes.Equal(certA, certB) { 590 t.Error("Expected the same cert") 591 } 592 if bytes.Equal(certB, certC) { 593 t.Error("Expected a different cert") 594 } 595 } 596 597 // Ensure Reload supports single user authentication config changes. Test this 598 // by starting a server with authentication enabled, connect to it to verify, 599 // reload config using a different username/password, ensure reconnect fails, 600 // then ensure reconnect succeeds when using the correct credentials. 601 func TestConfigReloadRotateUserAuthentication(t *testing.T) { 602 server, opts, config := runReloadServerWithConfig(t, "./configs/reload/single_user_authentication_1.conf") 603 defer server.Shutdown() 604 605 // Ensure we can connect as a sanity check. 606 addr := fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port) 607 nc, err := nats.Connect(addr, nats.UserInfo("tyler", "T0pS3cr3t")) 608 if err != nil { 609 t.Fatalf("Error creating client: %v", err) 610 } 611 defer nc.Close() 612 disconnected := make(chan struct{}, 1) 613 asyncErr := make(chan error, 1) 614 nc.SetErrorHandler(func(nc *nats.Conn, sub *nats.Subscription, err error) { 615 asyncErr <- err 616 }) 617 nc.SetDisconnectHandler(func(*nats.Conn) { 618 disconnected <- struct{}{} 619 }) 620 621 // Change user credentials. 622 changeCurrentConfigContent(t, config, "./configs/reload/single_user_authentication_2.conf") 623 if err := server.Reload(); err != nil { 624 t.Fatalf("Error reloading config: %v", err) 625 } 626 627 // Ensure connecting fails. 628 if _, err := nats.Connect(addr, nats.UserInfo("tyler", "T0pS3cr3t")); err == nil { 629 t.Fatal("Expected connect to fail") 630 } 631 632 // Ensure connecting succeeds when using new credentials. 633 conn, err := nats.Connect(addr, nats.UserInfo("derek", "passw0rd")) 634 if err != nil { 635 t.Fatalf("Error creating client: %v", err) 636 } 637 conn.Close() 638 639 // Ensure the previous connection received an authorization error. 640 // Note that it is possible that client gets EOF and not able to 641 // process async error, so don't fail if we don't get it. 642 select { 643 case err := <-asyncErr: 644 if err != nats.ErrAuthorization { 645 t.Fatalf("Expected ErrAuthorization, got %v", err) 646 } 647 case <-time.After(time.Second): 648 // Give it up to 1 sec. 649 } 650 651 // Ensure the previous connection was disconnected. 652 select { 653 case <-disconnected: 654 case <-time.After(2 * time.Second): 655 t.Fatal("Expected connection to be disconnected") 656 } 657 } 658 659 // Ensure Reload supports enabling single user authentication. Test this by 660 // starting a server with authentication disabled, connect to it to verify, 661 // reload config using with a username/password, ensure reconnect fails, then 662 // ensure reconnect succeeds when using the correct credentials. 663 func TestConfigReloadEnableUserAuthentication(t *testing.T) { 664 server, opts, config := runReloadServerWithConfig(t, "./configs/reload/basic.conf") 665 defer server.Shutdown() 666 667 // Ensure we can connect as a sanity check. 668 addr := fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port) 669 nc, err := nats.Connect(addr) 670 if err != nil { 671 t.Fatalf("Error creating client: %v", err) 672 } 673 defer nc.Close() 674 disconnected := make(chan struct{}, 1) 675 asyncErr := make(chan error, 1) 676 nc.SetErrorHandler(func(nc *nats.Conn, sub *nats.Subscription, err error) { 677 asyncErr <- err 678 }) 679 nc.SetDisconnectHandler(func(*nats.Conn) { 680 disconnected <- struct{}{} 681 }) 682 683 // Enable authentication. 684 changeCurrentConfigContent(t, config, "./configs/reload/single_user_authentication_1.conf") 685 if err := server.Reload(); err != nil { 686 t.Fatalf("Error reloading config: %v", err) 687 } 688 689 // Ensure connecting fails. 690 if _, err := nats.Connect(addr); err == nil { 691 t.Fatal("Expected connect to fail") 692 } 693 694 // Ensure connecting succeeds when using new credentials. 695 conn, err := nats.Connect(addr, nats.UserInfo("tyler", "T0pS3cr3t")) 696 if err != nil { 697 t.Fatalf("Error creating client: %v", err) 698 } 699 conn.Close() 700 701 // Ensure the previous connection received an authorization error. 702 // Note that it is possible that client gets EOF and not able to 703 // process async error, so don't fail if we don't get it. 704 select { 705 case err := <-asyncErr: 706 if err != nats.ErrAuthorization { 707 t.Fatalf("Expected ErrAuthorization, got %v", err) 708 } 709 case <-time.After(time.Second): 710 } 711 712 // Ensure the previous connection was disconnected. 713 select { 714 case <-disconnected: 715 case <-time.After(2 * time.Second): 716 t.Fatal("Expected connection to be disconnected") 717 } 718 } 719 720 // Ensure Reload supports disabling single user authentication. Test this by 721 // starting a server with authentication enabled, connect to it to verify, 722 // reload config using with authentication disabled, then ensure connecting 723 // with no credentials succeeds. 724 func TestConfigReloadDisableUserAuthentication(t *testing.T) { 725 server, opts, config := runReloadServerWithConfig(t, "./configs/reload/single_user_authentication_1.conf") 726 defer server.Shutdown() 727 728 // Ensure we can connect as a sanity check. 729 addr := fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port) 730 nc, err := nats.Connect(addr, nats.UserInfo("tyler", "T0pS3cr3t")) 731 if err != nil { 732 t.Fatalf("Error creating client: %v", err) 733 } 734 defer nc.Close() 735 nc.SetErrorHandler(func(nc *nats.Conn, sub *nats.Subscription, err error) { 736 t.Fatalf("Client received an unexpected error: %v", err) 737 }) 738 739 // Disable authentication. 740 changeCurrentConfigContent(t, config, "./configs/reload/basic.conf") 741 if err := server.Reload(); err != nil { 742 t.Fatalf("Error reloading config: %v", err) 743 } 744 745 // Ensure connecting succeeds with no credentials. 746 conn, err := nats.Connect(addr) 747 if err != nil { 748 t.Fatalf("Error creating client: %v", err) 749 } 750 conn.Close() 751 } 752 753 // Ensure Reload supports token authentication config changes. Test this by 754 // starting a server with token authentication enabled, connect to it to 755 // verify, reload config using a different token, ensure reconnect fails, then 756 // ensure reconnect succeeds when using the correct token. 757 func TestConfigReloadRotateTokenAuthentication(t *testing.T) { 758 server, opts, config := runReloadServerWithConfig(t, "./configs/reload/token_authentication_1.conf") 759 defer server.Shutdown() 760 761 disconnected := make(chan struct{}) 762 asyncErr := make(chan error) 763 eh := func(nc *nats.Conn, sub *nats.Subscription, err error) { asyncErr <- err } 764 dh := func(*nats.Conn) { disconnected <- struct{}{} } 765 766 // Ensure we can connect as a sanity check. 767 addr := fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port) 768 nc, err := nats.Connect(addr, nats.Token("T0pS3cr3t"), nats.ErrorHandler(eh), nats.DisconnectHandler(dh)) 769 if err != nil { 770 t.Fatalf("Error creating client: %v", err) 771 } 772 defer nc.Close() 773 774 // Change authentication token. 775 changeCurrentConfigContent(t, config, "./configs/reload/token_authentication_2.conf") 776 if err := server.Reload(); err != nil { 777 t.Fatalf("Error reloading config: %v", err) 778 } 779 780 // Ensure connecting fails. 781 if _, err := nats.Connect(addr, nats.Token("T0pS3cr3t")); err == nil { 782 t.Fatal("Expected connect to fail") 783 } 784 785 // Ensure connecting succeeds when using new credentials. 786 conn, err := nats.Connect(addr, nats.Token("passw0rd")) 787 if err != nil { 788 t.Fatalf("Error creating client: %v", err) 789 } 790 conn.Close() 791 792 // Ensure the previous connection received an authorization error. 793 select { 794 case err := <-asyncErr: 795 if err != nats.ErrAuthorization { 796 t.Fatalf("Expected ErrAuthorization, got %v", err) 797 } 798 case <-time.After(2 * time.Second): 799 t.Fatal("Expected authorization error") 800 } 801 802 // Ensure the previous connection was disconnected. 803 select { 804 case <-disconnected: 805 case <-time.After(2 * time.Second): 806 t.Fatal("Expected connection to be disconnected") 807 } 808 } 809 810 // Ensure Reload supports enabling token authentication. Test this by starting 811 // a server with authentication disabled, connect to it to verify, reload 812 // config using with a token, ensure reconnect fails, then ensure reconnect 813 // succeeds when using the correct token. 814 func TestConfigReloadEnableTokenAuthentication(t *testing.T) { 815 server, opts, config := runReloadServerWithConfig(t, "./configs/reload/basic.conf") 816 defer server.Shutdown() 817 818 // Ensure we can connect as a sanity check. 819 addr := fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port) 820 nc, err := nats.Connect(addr) 821 if err != nil { 822 t.Fatalf("Error creating client: %v", err) 823 } 824 defer nc.Close() 825 disconnected := make(chan struct{}, 1) 826 asyncErr := make(chan error, 1) 827 nc.SetErrorHandler(func(nc *nats.Conn, sub *nats.Subscription, err error) { 828 asyncErr <- err 829 }) 830 nc.SetDisconnectHandler(func(*nats.Conn) { 831 disconnected <- struct{}{} 832 }) 833 834 // Enable authentication. 835 changeCurrentConfigContent(t, config, "./configs/reload/token_authentication_1.conf") 836 if err := server.Reload(); err != nil { 837 t.Fatalf("Error reloading config: %v", err) 838 } 839 840 // Ensure connecting fails. 841 if _, err := nats.Connect(addr); err == nil { 842 t.Fatal("Expected connect to fail") 843 } 844 845 // Ensure connecting succeeds when using new credentials. 846 conn, err := nats.Connect(addr, nats.Token("T0pS3cr3t")) 847 if err != nil { 848 t.Fatalf("Error creating client: %v", err) 849 } 850 conn.Close() 851 852 // Ensure the previous connection received an authorization error. 853 // Note that it is possible that client gets EOF and not able to 854 // process async error, so don't fail if we don't get it. 855 select { 856 case err := <-asyncErr: 857 if err != nats.ErrAuthorization { 858 t.Fatalf("Expected ErrAuthorization, got %v", err) 859 } 860 case <-time.After(time.Second): 861 } 862 863 // Ensure the previous connection was disconnected. 864 select { 865 case <-disconnected: 866 case <-time.After(2 * time.Second): 867 t.Fatal("Expected connection to be disconnected") 868 } 869 } 870 871 // Ensure Reload supports disabling single token authentication. Test this by 872 // starting a server with authentication enabled, connect to it to verify, 873 // reload config using with authentication disabled, then ensure connecting 874 // with no token succeeds. 875 func TestConfigReloadDisableTokenAuthentication(t *testing.T) { 876 server, opts, config := runReloadServerWithConfig(t, "./configs/reload/token_authentication_1.conf") 877 defer server.Shutdown() 878 879 // Ensure we can connect as a sanity check. 880 addr := fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port) 881 nc, err := nats.Connect(addr, nats.Token("T0pS3cr3t")) 882 if err != nil { 883 t.Fatalf("Error creating client: %v", err) 884 } 885 defer nc.Close() 886 nc.SetErrorHandler(func(nc *nats.Conn, sub *nats.Subscription, err error) { 887 t.Fatalf("Client received an unexpected error: %v", err) 888 }) 889 890 // Disable authentication. 891 changeCurrentConfigContent(t, config, "./configs/reload/basic.conf") 892 if err := server.Reload(); err != nil { 893 t.Fatalf("Error reloading config: %v", err) 894 } 895 896 // Ensure connecting succeeds with no credentials. 897 conn, err := nats.Connect(addr) 898 if err != nil { 899 t.Fatalf("Error creating client: %v", err) 900 } 901 conn.Close() 902 } 903 904 // Ensure Reload supports users authentication config changes. Test this by 905 // starting a server with users authentication enabled, connect to it to 906 // verify, reload config using a different user, ensure reconnect fails, then 907 // ensure reconnect succeeds when using the correct credentials. 908 func TestConfigReloadRotateUsersAuthentication(t *testing.T) { 909 server, opts, config := runReloadServerWithConfig(t, "./configs/reload/multiple_users_1.conf") 910 defer server.Shutdown() 911 912 // Ensure we can connect as a sanity check. 913 addr := fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port) 914 nc, err := nats.Connect(addr, nats.UserInfo("alice", "foo")) 915 if err != nil { 916 t.Fatalf("Error creating client: %v", err) 917 } 918 defer nc.Close() 919 disconnected := make(chan struct{}, 1) 920 asyncErr := make(chan error, 1) 921 nc.SetErrorHandler(func(nc *nats.Conn, sub *nats.Subscription, err error) { 922 asyncErr <- err 923 }) 924 nc.SetDisconnectHandler(func(*nats.Conn) { 925 disconnected <- struct{}{} 926 }) 927 928 // These credentials won't change. 929 nc2, err := nats.Connect(addr, nats.UserInfo("bob", "bar")) 930 if err != nil { 931 t.Fatalf("Error creating client: %v", err) 932 } 933 defer nc2.Close() 934 sub, err := nc2.SubscribeSync("foo") 935 if err != nil { 936 t.Fatalf("Error subscribing: %v", err) 937 } 938 defer sub.Unsubscribe() 939 940 // Change users credentials. 941 changeCurrentConfigContent(t, config, "./configs/reload/multiple_users_2.conf") 942 if err := server.Reload(); err != nil { 943 t.Fatalf("Error reloading config: %v", err) 944 } 945 946 // Ensure connecting fails. 947 if _, err := nats.Connect(addr, nats.UserInfo("alice", "foo")); err == nil { 948 t.Fatal("Expected connect to fail") 949 } 950 951 // Ensure connecting succeeds when using new credentials. 952 conn, err := nats.Connect(addr, nats.UserInfo("alice", "baz")) 953 if err != nil { 954 t.Fatalf("Error creating client: %v", err) 955 } 956 conn.Close() 957 958 // Ensure the previous connection received an authorization error. 959 // Note that it is possible that client gets EOF and not able to 960 // process async error, so don't fail if we don't get it. 961 select { 962 case err := <-asyncErr: 963 if err != nats.ErrAuthorization { 964 t.Fatalf("Expected ErrAuthorization, got %v", err) 965 } 966 case <-time.After(time.Second): 967 } 968 969 // Ensure the previous connection was disconnected. 970 select { 971 case <-disconnected: 972 case <-time.After(2 * time.Second): 973 t.Fatal("Expected connection to be disconnected") 974 } 975 976 // Ensure the connection using unchanged credentials can still 977 // publish/receive. 978 if err := nc2.Publish("foo", []byte("hello")); err != nil { 979 t.Fatalf("Error publishing: %v", err) 980 } 981 nc2.Flush() 982 msg, err := sub.NextMsg(2 * time.Second) 983 if err != nil { 984 t.Fatalf("Error receiving msg: %v", err) 985 } 986 if string(msg.Data) != "hello" { 987 t.Fatalf("Msg is incorrect.\nexpected: %+v\ngot: %+v", []byte("hello"), msg.Data) 988 } 989 } 990 991 // Ensure Reload supports enabling users authentication. Test this by starting 992 // a server with authentication disabled, connect to it to verify, reload 993 // config using with users, ensure reconnect fails, then ensure reconnect 994 // succeeds when using the correct credentials. 995 func TestConfigReloadEnableUsersAuthentication(t *testing.T) { 996 server, opts, config := runReloadServerWithConfig(t, "./configs/reload/basic.conf") 997 defer server.Shutdown() 998 999 // Ensure we can connect as a sanity check. 1000 addr := fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port) 1001 nc, err := nats.Connect(addr) 1002 if err != nil { 1003 t.Fatalf("Error creating client: %v", err) 1004 } 1005 defer nc.Close() 1006 disconnected := make(chan struct{}, 1) 1007 asyncErr := make(chan error, 1) 1008 nc.SetErrorHandler(func(nc *nats.Conn, sub *nats.Subscription, err error) { 1009 asyncErr <- err 1010 }) 1011 nc.SetDisconnectHandler(func(*nats.Conn) { 1012 disconnected <- struct{}{} 1013 }) 1014 1015 // Enable authentication. 1016 changeCurrentConfigContent(t, config, "./configs/reload/multiple_users_1.conf") 1017 if err := server.Reload(); err != nil { 1018 t.Fatalf("Error reloading config: %v", err) 1019 } 1020 1021 // Ensure connecting fails. 1022 if _, err := nats.Connect(addr); err == nil { 1023 t.Fatal("Expected connect to fail") 1024 } 1025 1026 // Ensure connecting succeeds when using new credentials. 1027 conn, err := nats.Connect(addr, nats.UserInfo("alice", "foo")) 1028 if err != nil { 1029 t.Fatalf("Error creating client: %v", err) 1030 } 1031 conn.Close() 1032 1033 // Ensure the previous connection received an authorization error. 1034 // Note that it is possible that client gets EOF and not able to 1035 // process async error, so don't fail if we don't get it. 1036 select { 1037 case err := <-asyncErr: 1038 if err != nats.ErrAuthorization { 1039 t.Fatalf("Expected ErrAuthorization, got %v", err) 1040 } 1041 case <-time.After(time.Second): 1042 } 1043 1044 // Ensure the previous connection was disconnected. 1045 select { 1046 case <-disconnected: 1047 case <-time.After(5 * time.Second): 1048 t.Fatal("Expected connection to be disconnected") 1049 } 1050 } 1051 1052 // Ensure Reload supports disabling users authentication. Test this by starting 1053 // a server with authentication enabled, connect to it to verify, 1054 // reload config using with authentication disabled, then ensure connecting 1055 // with no credentials succeeds. 1056 func TestConfigReloadDisableUsersAuthentication(t *testing.T) { 1057 server, opts, config := runReloadServerWithConfig(t, "./configs/reload/multiple_users_1.conf") 1058 defer server.Shutdown() 1059 1060 // Ensure we can connect as a sanity check. 1061 addr := fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port) 1062 nc, err := nats.Connect(addr, nats.UserInfo("alice", "foo")) 1063 if err != nil { 1064 t.Fatalf("Error creating client: %v", err) 1065 } 1066 defer nc.Close() 1067 nc.SetErrorHandler(func(nc *nats.Conn, sub *nats.Subscription, err error) { 1068 t.Fatalf("Client received an unexpected error: %v", err) 1069 }) 1070 1071 // Disable authentication. 1072 changeCurrentConfigContent(t, config, "./configs/reload/basic.conf") 1073 if err := server.Reload(); err != nil { 1074 t.Fatalf("Error reloading config: %v", err) 1075 } 1076 1077 // Ensure connecting succeeds with no credentials. 1078 conn, err := nats.Connect(addr) 1079 if err != nil { 1080 t.Fatalf("Error creating client: %v", err) 1081 } 1082 conn.Close() 1083 } 1084 1085 // Ensure Reload supports changing permissions. Test this by starting a server 1086 // with a user configured with certain permissions, test publish and subscribe, 1087 // reload config with new permissions, ensure the previous subscription was 1088 // closed and publishes fail, then ensure the new permissions succeed. 1089 func TestConfigReloadChangePermissions(t *testing.T) { 1090 server, opts, config := runReloadServerWithConfig(t, "./configs/reload/authorization_1.conf") 1091 defer server.Shutdown() 1092 1093 addr := fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port) 1094 nc, err := nats.Connect(addr, nats.UserInfo("bob", "bar")) 1095 if err != nil { 1096 t.Fatalf("Error creating client: %v", err) 1097 } 1098 defer nc.Close() 1099 asyncErr := make(chan error, 1) 1100 nc.SetErrorHandler(func(nc *nats.Conn, sub *nats.Subscription, err error) { 1101 asyncErr <- err 1102 }) 1103 // Ensure we can publish and receive messages as a sanity check. 1104 sub, err := nc.SubscribeSync("_INBOX.>") 1105 if err != nil { 1106 t.Fatalf("Error subscribing: %v", err) 1107 } 1108 nc.Flush() 1109 1110 conn, err := nats.Connect(addr, nats.UserInfo("alice", "foo")) 1111 if err != nil { 1112 t.Fatalf("Error creating client: %v", err) 1113 } 1114 defer conn.Close() 1115 1116 sub2, err := conn.SubscribeSync("req.foo") 1117 if err != nil { 1118 t.Fatalf("Error subscribing: %v", err) 1119 } 1120 if err := conn.Publish("_INBOX.foo", []byte("hello")); err != nil { 1121 t.Fatalf("Error publishing message: %v", err) 1122 } 1123 conn.Flush() 1124 1125 msg, err := sub.NextMsg(2 * time.Second) 1126 if err != nil { 1127 t.Fatalf("Error receiving msg: %v", err) 1128 } 1129 if string(msg.Data) != "hello" { 1130 t.Fatalf("Msg is incorrect.\nexpected: %+v\ngot: %+v", []byte("hello"), msg.Data) 1131 } 1132 1133 if err := nc.Publish("req.foo", []byte("world")); err != nil { 1134 t.Fatalf("Error publishing message: %v", err) 1135 } 1136 nc.Flush() 1137 1138 msg, err = sub2.NextMsg(2 * time.Second) 1139 if err != nil { 1140 t.Fatalf("Error receiving msg: %v", err) 1141 } 1142 if string(msg.Data) != "world" { 1143 t.Fatalf("Msg is incorrect.\nexpected: %+v\ngot: %+v", []byte("world"), msg.Data) 1144 } 1145 1146 // Susan will subscribe to two subjects, both will succeed but a send to foo.bar should not succeed 1147 // however PUBLIC.foo should. 1148 sconn, err := nats.Connect(addr, nats.UserInfo("susan", "baz")) 1149 if err != nil { 1150 t.Fatalf("Error creating client: %v", err) 1151 } 1152 defer sconn.Close() 1153 1154 asyncErr2 := make(chan error, 1) 1155 sconn.SetErrorHandler(func(nc *nats.Conn, sub *nats.Subscription, err error) { 1156 asyncErr2 <- err 1157 }) 1158 1159 fooSub, err := sconn.SubscribeSync("foo.*") 1160 if err != nil { 1161 t.Fatalf("Error subscribing: %v", err) 1162 } 1163 sconn.Flush() 1164 1165 // Publishing from bob on foo.bar should not come through. 1166 if err := conn.Publish("foo.bar", []byte("hello")); err != nil { 1167 t.Fatalf("Error publishing message: %v", err) 1168 } 1169 conn.Flush() 1170 1171 _, err = fooSub.NextMsg(100 * time.Millisecond) 1172 if err != nats.ErrTimeout { 1173 t.Fatalf("Received a message we shouldn't have") 1174 } 1175 1176 pubSub, err := sconn.SubscribeSync("PUBLIC.*") 1177 if err != nil { 1178 t.Fatalf("Error subscribing: %v", err) 1179 } 1180 sconn.Flush() 1181 1182 select { 1183 case err := <-asyncErr2: 1184 t.Fatalf("Received unexpected error for susan: %v", err) 1185 default: 1186 } 1187 1188 // This should work ok with original config. 1189 if err := conn.Publish("PUBLIC.foo", []byte("hello monkey")); err != nil { 1190 t.Fatalf("Error publishing message: %v", err) 1191 } 1192 conn.Flush() 1193 1194 msg, err = pubSub.NextMsg(2 * time.Second) 1195 if err != nil { 1196 t.Fatalf("Error receiving msg: %v", err) 1197 } 1198 if string(msg.Data) != "hello monkey" { 1199 t.Fatalf("Msg is incorrect.\nexpected: %q\ngot: %q", "hello monkey", msg.Data) 1200 } 1201 1202 /////////////////////////////////////////// 1203 // Change permissions. 1204 /////////////////////////////////////////// 1205 1206 changeCurrentConfigContent(t, config, "./configs/reload/authorization_2.conf") 1207 if err := server.Reload(); err != nil { 1208 t.Fatalf("Error reloading config: %v", err) 1209 } 1210 1211 // Ensure we receive an error for the subscription that is no longer authorized. 1212 // In this test, since connection is not closed by the server, 1213 // the client must receive an -ERR 1214 select { 1215 case err := <-asyncErr: 1216 if !strings.Contains(strings.ToLower(err.Error()), "permissions violation for subscription to \"_inbox.>\"") { 1217 t.Fatalf("Expected permissions violation error, got %v", err) 1218 } 1219 case <-time.After(5 * time.Second): 1220 t.Fatal("Expected permissions violation error") 1221 } 1222 1223 // Ensure we receive an error when publishing to req.foo and we no longer 1224 // receive messages on _INBOX.>. 1225 if err := nc.Publish("req.foo", []byte("hola")); err != nil { 1226 t.Fatalf("Error publishing message: %v", err) 1227 } 1228 nc.Flush() 1229 if err := conn.Publish("_INBOX.foo", []byte("mundo")); err != nil { 1230 t.Fatalf("Error publishing message: %v", err) 1231 } 1232 conn.Flush() 1233 1234 select { 1235 case err := <-asyncErr: 1236 if !strings.Contains(strings.ToLower(err.Error()), "permissions violation for publish to \"req.foo\"") { 1237 t.Fatalf("Expected permissions violation error, got %v", err) 1238 } 1239 case <-time.After(5 * time.Second): 1240 t.Fatal("Expected permissions violation error") 1241 } 1242 1243 queued, _, err := sub2.Pending() 1244 if err != nil { 1245 t.Fatalf("Failed to get pending messaged: %v", err) 1246 } 1247 if queued != 0 { 1248 t.Fatalf("Pending is incorrect.\nexpected: 0\ngot: %d", queued) 1249 } 1250 1251 queued, _, err = sub.Pending() 1252 if err != nil { 1253 t.Fatalf("Failed to get pending messaged: %v", err) 1254 } 1255 if queued != 0 { 1256 t.Fatalf("Pending is incorrect.\nexpected: 0\ngot: %d", queued) 1257 } 1258 1259 // Ensure we can publish to _INBOX.foo.bar and subscribe to _INBOX.foo.>. 1260 sub, err = nc.SubscribeSync("_INBOX.foo.>") 1261 if err != nil { 1262 t.Fatalf("Error subscribing: %v", err) 1263 } 1264 nc.Flush() 1265 if err := nc.Publish("_INBOX.foo.bar", []byte("testing")); err != nil { 1266 t.Fatalf("Error publishing message: %v", err) 1267 } 1268 nc.Flush() 1269 msg, err = sub.NextMsg(2 * time.Second) 1270 if err != nil { 1271 t.Fatalf("Error receiving msg: %v", err) 1272 } 1273 if string(msg.Data) != "testing" { 1274 t.Fatalf("Msg is incorrect.\nexpected: %+v\ngot: %+v", []byte("testing"), msg.Data) 1275 } 1276 1277 select { 1278 case err := <-asyncErr: 1279 t.Fatalf("Received unexpected error: %v", err) 1280 default: 1281 } 1282 1283 // Now check susan again. 1284 // 1285 // This worked ok with original config but should not deliver a message now. 1286 if err := conn.Publish("PUBLIC.foo", []byte("hello monkey")); err != nil { 1287 t.Fatalf("Error publishing message: %v", err) 1288 } 1289 conn.Flush() 1290 1291 _, err = pubSub.NextMsg(100 * time.Millisecond) 1292 if err != nats.ErrTimeout { 1293 t.Fatalf("Received a message we shouldn't have") 1294 } 1295 1296 // Now check foo.bar, which did not work before but should work now.. 1297 if err := conn.Publish("foo.bar", []byte("hello?")); err != nil { 1298 t.Fatalf("Error publishing message: %v", err) 1299 } 1300 conn.Flush() 1301 1302 msg, err = fooSub.NextMsg(2 * time.Second) 1303 if err != nil { 1304 t.Fatalf("Error receiving msg: %v", err) 1305 } 1306 if string(msg.Data) != "hello?" { 1307 t.Fatalf("Msg is incorrect.\nexpected: %q\ngot: %q", "hello?", msg.Data) 1308 } 1309 1310 // Once last check for no errors. 1311 sconn.Flush() 1312 1313 select { 1314 case err := <-asyncErr2: 1315 t.Fatalf("Received unexpected error for susan: %v", err) 1316 default: 1317 } 1318 } 1319 1320 // Ensure Reload returns an error when attempting to change cluster address 1321 // host. 1322 func TestConfigReloadClusterHostUnsupported(t *testing.T) { 1323 server, _, config := runReloadServerWithConfig(t, "./configs/reload/srv_a_1.conf") 1324 defer server.Shutdown() 1325 1326 // Attempt to change cluster listen host. 1327 changeCurrentConfigContent(t, config, "./configs/reload/srv_c_1.conf") 1328 1329 // This should fail because cluster address cannot be changed. 1330 if err := server.Reload(); err == nil { 1331 t.Fatal("Expected Reload to return an error") 1332 } 1333 } 1334 1335 // Ensure Reload returns an error when attempting to change cluster address 1336 // port. 1337 func TestConfigReloadClusterPortUnsupported(t *testing.T) { 1338 server, _, config := runReloadServerWithConfig(t, "./configs/reload/srv_a_1.conf") 1339 defer server.Shutdown() 1340 1341 // Attempt to change cluster listen port. 1342 changeCurrentConfigContent(t, config, "./configs/reload/srv_b_1.conf") 1343 1344 // This should fail because cluster address cannot be changed. 1345 if err := server.Reload(); err == nil { 1346 t.Fatal("Expected Reload to return an error") 1347 } 1348 } 1349 1350 // Ensure Reload supports enabling route authorization. Test this by starting 1351 // two servers in a cluster without authorization, ensuring messages flow 1352 // between them, then reloading with authorization and ensuring messages no 1353 // longer flow until reloading with the correct credentials. 1354 func TestConfigReloadEnableClusterAuthorization(t *testing.T) { 1355 srvb, srvbOpts, srvbConfig := runReloadServerWithConfig(t, "./configs/reload/srv_b_1.conf") 1356 defer srvb.Shutdown() 1357 1358 srva, srvaOpts, srvaConfig := runReloadServerWithConfig(t, "./configs/reload/srv_a_1.conf") 1359 defer srva.Shutdown() 1360 1361 checkClusterFormed(t, srva, srvb) 1362 1363 srvaAddr := fmt.Sprintf("nats://%s:%d", srvaOpts.Host, srvaOpts.Port) 1364 srvaConn, err := nats.Connect(srvaAddr) 1365 if err != nil { 1366 t.Fatalf("Error creating client: %v", err) 1367 } 1368 defer srvaConn.Close() 1369 sub, err := srvaConn.SubscribeSync("foo") 1370 if err != nil { 1371 t.Fatalf("Error subscribing: %v", err) 1372 } 1373 defer sub.Unsubscribe() 1374 if err := srvaConn.Flush(); err != nil { 1375 t.Fatalf("Error flushing: %v", err) 1376 } 1377 1378 srvbAddr := fmt.Sprintf("nats://%s:%d", srvbOpts.Host, srvbOpts.Port) 1379 srvbConn, err := nats.Connect(srvbAddr) 1380 if err != nil { 1381 t.Fatalf("Error creating client: %v", err) 1382 } 1383 defer srvbConn.Close() 1384 1385 if numRoutes := srvb.NumRoutes(); numRoutes != DEFAULT_ROUTE_POOL_SIZE { 1386 t.Fatalf("Expected %d route, got %d", DEFAULT_ROUTE_POOL_SIZE, numRoutes) 1387 } 1388 1389 // Ensure messages flow through the cluster as a sanity check. 1390 if err := srvbConn.Publish("foo", []byte("hello")); err != nil { 1391 t.Fatalf("Error publishing: %v", err) 1392 } 1393 srvbConn.Flush() 1394 msg, err := sub.NextMsg(2 * time.Second) 1395 if err != nil { 1396 t.Fatalf("Error receiving message: %v", err) 1397 } 1398 if string(msg.Data) != "hello" { 1399 t.Fatalf("Msg is incorrect.\nexpected: %+v\ngot: %+v", []byte("hello"), msg.Data) 1400 } 1401 1402 // Enable route authorization. 1403 changeCurrentConfigContent(t, srvbConfig, "./configs/reload/srv_b_2.conf") 1404 if err := srvb.Reload(); err != nil { 1405 t.Fatalf("Error reloading config: %v", err) 1406 } 1407 1408 checkNumRoutes(t, srvb, 0) 1409 1410 // Ensure messages no longer flow through the cluster. 1411 for i := 0; i < 5; i++ { 1412 if err := srvbConn.Publish("foo", []byte("world")); err != nil { 1413 t.Fatalf("Error publishing: %v", err) 1414 } 1415 srvbConn.Flush() 1416 } 1417 if _, err := sub.NextMsg(50 * time.Millisecond); err != nats.ErrTimeout { 1418 t.Fatalf("Expected ErrTimeout, got %v", err) 1419 } 1420 1421 // Reload Server A with correct route credentials. 1422 changeCurrentConfigContent(t, srvaConfig, "./configs/reload/srv_a_2.conf") 1423 if err := srva.Reload(); err != nil { 1424 t.Fatalf("Error reloading config: %v", err) 1425 } 1426 checkClusterFormed(t, srva, srvb) 1427 1428 if numRoutes := srvb.NumRoutes(); numRoutes != DEFAULT_ROUTE_POOL_SIZE { 1429 t.Fatalf("Expected %d route, got %d", DEFAULT_ROUTE_POOL_SIZE, numRoutes) 1430 } 1431 1432 // Ensure messages flow through the cluster now. 1433 if err := srvbConn.Publish("foo", []byte("hola")); err != nil { 1434 t.Fatalf("Error publishing: %v", err) 1435 } 1436 srvbConn.Flush() 1437 msg, err = sub.NextMsg(2 * time.Second) 1438 if err != nil { 1439 t.Fatalf("Error receiving message: %v", err) 1440 } 1441 if string(msg.Data) != "hola" { 1442 t.Fatalf("Msg is incorrect.\nexpected: %+v\ngot: %+v", []byte("hola"), msg.Data) 1443 } 1444 } 1445 1446 // Ensure Reload supports disabling route authorization. Test this by starting 1447 // two servers in a cluster with authorization, ensuring messages flow 1448 // between them, then reloading without authorization and ensuring messages 1449 // still flow. 1450 func TestConfigReloadDisableClusterAuthorization(t *testing.T) { 1451 srvb, srvbOpts, srvbConfig := runReloadServerWithConfig(t, "./configs/reload/srv_b_2.conf") 1452 defer srvb.Shutdown() 1453 1454 srva, srvaOpts, _ := runReloadServerWithConfig(t, "./configs/reload/srv_a_2.conf") 1455 defer srva.Shutdown() 1456 1457 checkClusterFormed(t, srva, srvb) 1458 1459 srvaAddr := fmt.Sprintf("nats://%s:%d", srvaOpts.Host, srvaOpts.Port) 1460 srvaConn, err := nats.Connect(srvaAddr) 1461 if err != nil { 1462 t.Fatalf("Error creating client: %v", err) 1463 } 1464 defer srvaConn.Close() 1465 1466 sub, err := srvaConn.SubscribeSync("foo") 1467 if err != nil { 1468 t.Fatalf("Error subscribing: %v", err) 1469 } 1470 defer sub.Unsubscribe() 1471 if err := srvaConn.Flush(); err != nil { 1472 t.Fatalf("Error flushing: %v", err) 1473 } 1474 1475 srvbAddr := fmt.Sprintf("nats://%s:%d", srvbOpts.Host, srvbOpts.Port) 1476 srvbConn, err := nats.Connect(srvbAddr) 1477 if err != nil { 1478 t.Fatalf("Error creating client: %v", err) 1479 } 1480 defer srvbConn.Close() 1481 1482 if numRoutes := srvb.NumRoutes(); numRoutes != DEFAULT_ROUTE_POOL_SIZE { 1483 t.Fatalf("Expected %d route, got %d", DEFAULT_ROUTE_POOL_SIZE, numRoutes) 1484 } 1485 1486 // Ensure messages flow through the cluster as a sanity check. 1487 if err := srvbConn.Publish("foo", []byte("hello")); err != nil { 1488 t.Fatalf("Error publishing: %v", err) 1489 } 1490 srvbConn.Flush() 1491 msg, err := sub.NextMsg(2 * time.Second) 1492 if err != nil { 1493 t.Fatalf("Error receiving message: %v", err) 1494 } 1495 if string(msg.Data) != "hello" { 1496 t.Fatalf("Msg is incorrect.\nexpected: %+v\ngot: %+v", []byte("hello"), msg.Data) 1497 } 1498 1499 // Disable route authorization. 1500 changeCurrentConfigContent(t, srvbConfig, "./configs/reload/srv_b_1.conf") 1501 if err := srvb.Reload(); err != nil { 1502 t.Fatalf("Error reloading config: %v", err) 1503 } 1504 1505 checkClusterFormed(t, srva, srvb) 1506 1507 if numRoutes := srvb.NumRoutes(); numRoutes != DEFAULT_ROUTE_POOL_SIZE { 1508 t.Fatalf("Expected %d route, got %d", DEFAULT_ROUTE_POOL_SIZE, numRoutes) 1509 } 1510 1511 // Ensure messages still flow through the cluster. 1512 if err := srvbConn.Publish("foo", []byte("hola")); err != nil { 1513 t.Fatalf("Error publishing: %v", err) 1514 } 1515 srvbConn.Flush() 1516 msg, err = sub.NextMsg(2 * time.Second) 1517 if err != nil { 1518 t.Fatalf("Error receiving message: %v", err) 1519 } 1520 if string(msg.Data) != "hola" { 1521 t.Fatalf("Msg is incorrect.\nexpected: %+v\ngot: %+v", []byte("hola"), msg.Data) 1522 } 1523 } 1524 1525 // Ensure Reload supports changing cluster routes. Test this by starting 1526 // two servers in a cluster, ensuring messages flow between them, then 1527 // reloading with a different route and ensuring messages flow through the new 1528 // cluster. 1529 func TestConfigReloadClusterRoutes(t *testing.T) { 1530 srvb, srvbOpts, _ := runReloadServerWithConfig(t, "./configs/reload/srv_b_1.conf") 1531 defer srvb.Shutdown() 1532 1533 srva, srvaOpts, srvaConfig := runReloadServerWithConfig(t, "./configs/reload/srv_a_1.conf") 1534 defer srva.Shutdown() 1535 1536 checkClusterFormed(t, srva, srvb) 1537 1538 srvcOpts, err := ProcessConfigFile("./configs/reload/srv_c_1.conf") 1539 if err != nil { 1540 t.Fatalf("Error processing config file: %v", err) 1541 } 1542 srvcOpts.NoLog = true 1543 srvcOpts.NoSigs = true 1544 1545 srvc := RunServer(srvcOpts) 1546 defer srvc.Shutdown() 1547 1548 srvaAddr := fmt.Sprintf("nats://%s:%d", srvaOpts.Host, srvaOpts.Port) 1549 srvaConn, err := nats.Connect(srvaAddr) 1550 if err != nil { 1551 t.Fatalf("Error creating client: %v", err) 1552 } 1553 defer srvaConn.Close() 1554 1555 sub, err := srvaConn.SubscribeSync("foo") 1556 if err != nil { 1557 t.Fatalf("Error subscribing: %v", err) 1558 } 1559 defer sub.Unsubscribe() 1560 if err := srvaConn.Flush(); err != nil { 1561 t.Fatalf("Error flushing: %v", err) 1562 } 1563 1564 srvbAddr := fmt.Sprintf("nats://%s:%d", srvbOpts.Host, srvbOpts.Port) 1565 srvbConn, err := nats.Connect(srvbAddr) 1566 if err != nil { 1567 t.Fatalf("Error creating client: %v", err) 1568 } 1569 defer srvbConn.Close() 1570 1571 if numRoutes := srvb.NumRoutes(); numRoutes != DEFAULT_ROUTE_POOL_SIZE { 1572 t.Fatalf("Expected %d route, got %d", DEFAULT_ROUTE_POOL_SIZE, numRoutes) 1573 } 1574 1575 // Ensure consumer on srvA is propagated to srvB 1576 checkExpectedSubs(t, 1, srvb) 1577 1578 // Ensure messages flow through the cluster as a sanity check. 1579 if err := srvbConn.Publish("foo", []byte("hello")); err != nil { 1580 t.Fatalf("Error publishing: %v", err) 1581 } 1582 srvbConn.Flush() 1583 msg, err := sub.NextMsg(2 * time.Second) 1584 if err != nil { 1585 t.Fatalf("Error receiving message: %v", err) 1586 } 1587 if string(msg.Data) != "hello" { 1588 t.Fatalf("Msg is incorrect.\nexpected: %+v\ngot: %+v", []byte("hello"), msg.Data) 1589 } 1590 1591 // Reload cluster routes. 1592 changeCurrentConfigContent(t, srvaConfig, "./configs/reload/srv_a_3.conf") 1593 if err := srva.Reload(); err != nil { 1594 t.Fatalf("Error reloading config: %v", err) 1595 } 1596 1597 // Kill old route server. 1598 srvbConn.Close() 1599 srvb.Shutdown() 1600 1601 checkClusterFormed(t, srva, srvc) 1602 1603 srvcAddr := fmt.Sprintf("nats://%s:%d", srvcOpts.Host, srvcOpts.Port) 1604 srvcConn, err := nats.Connect(srvcAddr) 1605 if err != nil { 1606 t.Fatalf("Error creating client: %v", err) 1607 } 1608 defer srvcConn.Close() 1609 1610 // Ensure messages flow through the new cluster. 1611 for i := 0; i < 5; i++ { 1612 if err := srvcConn.Publish("foo", []byte("hola")); err != nil { 1613 t.Fatalf("Error publishing: %v", err) 1614 } 1615 srvcConn.Flush() 1616 } 1617 msg, err = sub.NextMsg(2 * time.Second) 1618 if err != nil { 1619 t.Fatalf("Error receiving message: %v", err) 1620 } 1621 if string(msg.Data) != "hola" { 1622 t.Fatalf("Msg is incorrect.\nexpected: %+v\ngot: %+v", []byte("hola"), msg.Data) 1623 } 1624 } 1625 1626 // Ensure Reload supports removing a solicited route. In this case from A->B 1627 // Test this by starting two servers in a cluster, ensuring messages flow between them. 1628 // Then stop server B, and have server A continue to try to connect. Reload A with a config 1629 // that removes the route and make sure it does not connect to server B when its restarted. 1630 func TestConfigReloadClusterRemoveSolicitedRoutes(t *testing.T) { 1631 srvb, srvbOpts := RunServerWithConfig("./configs/reload/srv_b_1.conf") 1632 defer srvb.Shutdown() 1633 1634 srva, srvaOpts, srvaConfig := runReloadServerWithConfig(t, "./configs/reload/srv_a_1.conf") 1635 defer srva.Shutdown() 1636 1637 checkClusterFormed(t, srva, srvb) 1638 1639 srvaAddr := fmt.Sprintf("nats://%s:%d", srvaOpts.Host, srvaOpts.Port) 1640 srvaConn, err := nats.Connect(srvaAddr) 1641 if err != nil { 1642 t.Fatalf("Error creating client: %v", err) 1643 } 1644 defer srvaConn.Close() 1645 sub, err := srvaConn.SubscribeSync("foo") 1646 if err != nil { 1647 t.Fatalf("Error subscribing: %v", err) 1648 } 1649 defer sub.Unsubscribe() 1650 if err := srvaConn.Flush(); err != nil { 1651 t.Fatalf("Error flushing: %v", err) 1652 } 1653 checkExpectedSubs(t, 1, srvb) 1654 1655 srvbAddr := fmt.Sprintf("nats://%s:%d", srvbOpts.Host, srvbOpts.Port) 1656 srvbConn, err := nats.Connect(srvbAddr) 1657 if err != nil { 1658 t.Fatalf("Error creating client: %v", err) 1659 } 1660 defer srvbConn.Close() 1661 1662 if err := srvbConn.Publish("foo", []byte("hello")); err != nil { 1663 t.Fatalf("Error publishing: %v", err) 1664 } 1665 srvbConn.Flush() 1666 msg, err := sub.NextMsg(5 * time.Second) 1667 if err != nil { 1668 t.Fatalf("Error receiving message: %v", err) 1669 } 1670 if string(msg.Data) != "hello" { 1671 t.Fatalf("Msg is incorrect.\nexpected: %+v\ngot: %+v", []byte("hello"), msg.Data) 1672 } 1673 1674 // Now stop server B. 1675 srvb.Shutdown() 1676 1677 // Wait til route is dropped. 1678 checkNumRoutes(t, srva, 0) 1679 1680 // Now change config for server A to not solicit a route to server B. 1681 changeCurrentConfigContent(t, srvaConfig, "./configs/reload/srv_a_4.conf") 1682 if err := srva.Reload(); err != nil { 1683 t.Fatalf("Error reloading config: %v", err) 1684 } 1685 1686 // Restart server B. 1687 srvb, _ = RunServerWithConfig("./configs/reload/srv_b_1.conf") 1688 defer srvb.Shutdown() 1689 1690 // We should not have a cluster formed here. 1691 numRoutes := 0 1692 deadline := time.Now().Add(2 * DEFAULT_ROUTE_RECONNECT) 1693 for time.Now().Before(deadline) { 1694 if numRoutes = srva.NumRoutes(); numRoutes != 0 { 1695 break 1696 } else { 1697 time.Sleep(100 * time.Millisecond) 1698 } 1699 } 1700 if numRoutes != 0 { 1701 t.Fatalf("Expected 0 routes for server A, got %d", numRoutes) 1702 } 1703 } 1704 1705 func reloadUpdateConfig(t *testing.T, s *Server, conf, content string) { 1706 t.Helper() 1707 if err := os.WriteFile(conf, []byte(content), 0666); err != nil { 1708 t.Fatalf("Error creating config file: %v", err) 1709 } 1710 if err := s.Reload(); err != nil { 1711 t.Fatalf("Error on reload: %v", err) 1712 } 1713 } 1714 1715 func TestConfigReloadClusterAdvertise(t *testing.T) { 1716 s, _, conf := runReloadServerWithContent(t, []byte(` 1717 listen: "0.0.0.0:-1" 1718 cluster: { 1719 listen: "0.0.0.0:-1" 1720 } 1721 `)) 1722 defer s.Shutdown() 1723 1724 orgClusterPort := s.ClusterAddr().Port 1725 1726 verify := func(expectedHost string, expectedPort int, expectedIP string) { 1727 s.mu.Lock() 1728 routeInfo := s.routeInfo 1729 rij := generateInfoJSON(&routeInfo) 1730 s.mu.Unlock() 1731 if routeInfo.Host != expectedHost || routeInfo.Port != expectedPort || routeInfo.IP != expectedIP { 1732 t.Fatalf("Expected host/port/IP to be %s:%v, %q, got %s:%d, %q", 1733 expectedHost, expectedPort, expectedIP, routeInfo.Host, routeInfo.Port, routeInfo.IP) 1734 } 1735 routeInfoJSON := Info{} 1736 err := json.Unmarshal(rij[5:], &routeInfoJSON) // Skip "INFO " 1737 if err != nil { 1738 t.Fatalf("Error on Unmarshal: %v", err) 1739 } 1740 // Check that server routeInfoJSON was updated too 1741 if !reflect.DeepEqual(routeInfo, routeInfoJSON) { 1742 t.Fatalf("Expected routeInfoJSON to be %+v, got %+v", routeInfo, routeInfoJSON) 1743 } 1744 } 1745 1746 // Update config with cluster_advertise 1747 reloadUpdateConfig(t, s, conf, ` 1748 listen: "0.0.0.0:-1" 1749 cluster: { 1750 listen: "0.0.0.0:-1" 1751 cluster_advertise: "me:1" 1752 } 1753 `) 1754 verify("me", 1, "nats-route://me:1/") 1755 1756 // Update config with cluster_advertise (no port specified) 1757 reloadUpdateConfig(t, s, conf, ` 1758 listen: "0.0.0.0:-1" 1759 cluster: { 1760 listen: "0.0.0.0:-1" 1761 cluster_advertise: "me" 1762 } 1763 `) 1764 verify("me", orgClusterPort, fmt.Sprintf("nats-route://me:%d/", orgClusterPort)) 1765 1766 // Update config with cluster_advertise (-1 port specified) 1767 reloadUpdateConfig(t, s, conf, ` 1768 listen: "0.0.0.0:-1" 1769 cluster: { 1770 listen: "0.0.0.0:-1" 1771 cluster_advertise: "me:-1" 1772 } 1773 `) 1774 verify("me", orgClusterPort, fmt.Sprintf("nats-route://me:%d/", orgClusterPort)) 1775 1776 // Update to remove cluster_advertise 1777 reloadUpdateConfig(t, s, conf, ` 1778 listen: "0.0.0.0:-1" 1779 cluster: { 1780 listen: "0.0.0.0:-1" 1781 } 1782 `) 1783 verify("0.0.0.0", orgClusterPort, "") 1784 } 1785 1786 func TestConfigReloadClusterNoAdvertise(t *testing.T) { 1787 s, _, conf := runReloadServerWithContent(t, []byte(` 1788 listen: "0.0.0.0:-1" 1789 client_advertise: "me:1" 1790 cluster: { 1791 listen: "0.0.0.0:-1" 1792 } 1793 `)) 1794 defer s.Shutdown() 1795 1796 s.mu.Lock() 1797 ccurls := s.routeInfo.ClientConnectURLs 1798 s.mu.Unlock() 1799 if len(ccurls) != 1 && ccurls[0] != "me:1" { 1800 t.Fatalf("Unexpected routeInfo.ClientConnectURLS: %v", ccurls) 1801 } 1802 1803 // Update config with no_advertise 1804 reloadUpdateConfig(t, s, conf, ` 1805 listen: "0.0.0.0:-1" 1806 client_advertise: "me:1" 1807 cluster: { 1808 listen: "0.0.0.0:-1" 1809 no_advertise: true 1810 } 1811 `) 1812 1813 s.mu.Lock() 1814 ccurls = s.routeInfo.ClientConnectURLs 1815 s.mu.Unlock() 1816 if len(ccurls) != 0 { 1817 t.Fatalf("Unexpected routeInfo.ClientConnectURLS: %v", ccurls) 1818 } 1819 1820 // Update config with cluster_advertise (no port specified) 1821 reloadUpdateConfig(t, s, conf, ` 1822 listen: "0.0.0.0:-1" 1823 client_advertise: "me:1" 1824 cluster: { 1825 listen: "0.0.0.0:-1" 1826 } 1827 `) 1828 s.mu.Lock() 1829 ccurls = s.routeInfo.ClientConnectURLs 1830 s.mu.Unlock() 1831 if len(ccurls) != 1 && ccurls[0] != "me:1" { 1832 t.Fatalf("Unexpected routeInfo.ClientConnectURLS: %v", ccurls) 1833 } 1834 } 1835 1836 func TestConfigReloadClusterName(t *testing.T) { 1837 s, _, conf := runReloadServerWithContent(t, []byte(` 1838 listen: "0.0.0.0:-1" 1839 cluster: { 1840 name: "abc" 1841 listen: "0.0.0.0:-1" 1842 } 1843 `)) 1844 defer s.Shutdown() 1845 1846 // Update config with a new cluster name. 1847 reloadUpdateConfig(t, s, conf, ` 1848 listen: "0.0.0.0:-1" 1849 cluster: { 1850 name: "xyz" 1851 listen: "0.0.0.0:-1" 1852 } 1853 `) 1854 1855 if s.ClusterName() != "xyz" { 1856 t.Fatalf("Expected update clustername of \"xyz\", got %q", s.ClusterName()) 1857 } 1858 } 1859 1860 func TestConfigReloadMaxSubsUnsupported(t *testing.T) { 1861 s, _, conf := runReloadServerWithContent(t, []byte(` 1862 port: -1 1863 max_subs: 1 1864 `)) 1865 defer s.Shutdown() 1866 1867 if err := os.WriteFile(conf, []byte(`max_subs: 10`), 0666); err != nil { 1868 t.Fatalf("Error writing config file: %v", err) 1869 } 1870 if err := s.Reload(); err == nil { 1871 t.Fatal("Expected Reload to return an error") 1872 } 1873 } 1874 1875 func TestConfigReloadClientAdvertise(t *testing.T) { 1876 s, _, conf := runReloadServerWithContent(t, []byte(`listen: "0.0.0.0:-1"`)) 1877 defer s.Shutdown() 1878 1879 orgPort := s.Addr().(*net.TCPAddr).Port 1880 1881 verify := func(expectedHost string, expectedPort int) { 1882 s.mu.Lock() 1883 info := s.info 1884 s.mu.Unlock() 1885 if info.Host != expectedHost || info.Port != expectedPort { 1886 stackFatalf(t, "Expected host/port to be %s:%d, got %s:%d", 1887 expectedHost, expectedPort, info.Host, info.Port) 1888 } 1889 } 1890 1891 // Update config with ClientAdvertise (port specified) 1892 reloadUpdateConfig(t, s, conf, ` 1893 listen: "0.0.0.0:-1" 1894 client_advertise: "me:1" 1895 `) 1896 verify("me", 1) 1897 1898 // Update config with ClientAdvertise (no port specified) 1899 reloadUpdateConfig(t, s, conf, ` 1900 listen: "0.0.0.0:-1" 1901 client_advertise: "me" 1902 `) 1903 verify("me", orgPort) 1904 1905 // Update config with ClientAdvertise (-1 port specified) 1906 reloadUpdateConfig(t, s, conf, ` 1907 listen: "0.0.0.0:-1" 1908 client_advertise: "me:-1" 1909 `) 1910 verify("me", orgPort) 1911 1912 // Now remove ClientAdvertise to check that original values 1913 // are restored. 1914 reloadUpdateConfig(t, s, conf, `listen: "0.0.0.0:-1"`) 1915 verify("0.0.0.0", orgPort) 1916 } 1917 1918 // Ensure Reload supports changing the max connections. Test this by starting a 1919 // server with no max connections, connecting two clients, reloading with a 1920 // max connections of one, and ensuring one client is disconnected. 1921 func TestConfigReloadMaxConnections(t *testing.T) { 1922 server, opts, config := runReloadServerWithConfig(t, "./configs/reload/basic.conf") 1923 defer server.Shutdown() 1924 1925 // Make two connections. 1926 addr := fmt.Sprintf("nats://%s:%d", opts.Host, server.Addr().(*net.TCPAddr).Port) 1927 nc1, err := nats.Connect(addr) 1928 if err != nil { 1929 t.Fatalf("Error creating client: %v", err) 1930 } 1931 defer nc1.Close() 1932 closed := make(chan struct{}, 1) 1933 nc1.SetDisconnectHandler(func(*nats.Conn) { 1934 closed <- struct{}{} 1935 }) 1936 nc2, err := nats.Connect(addr) 1937 if err != nil { 1938 t.Fatalf("Error creating client: %v", err) 1939 } 1940 defer nc2.Close() 1941 nc2.SetDisconnectHandler(func(*nats.Conn) { 1942 closed <- struct{}{} 1943 }) 1944 1945 if numClients := server.NumClients(); numClients != 2 { 1946 t.Fatalf("Expected 2 clients, got %d", numClients) 1947 } 1948 1949 // Set max connections to one. 1950 changeCurrentConfigContent(t, config, "./configs/reload/max_connections.conf") 1951 if err := server.Reload(); err != nil { 1952 t.Fatalf("Error reloading config: %v", err) 1953 } 1954 1955 // Ensure one connection was closed. 1956 select { 1957 case <-closed: 1958 case <-time.After(5 * time.Second): 1959 t.Fatal("Expected to be disconnected") 1960 } 1961 1962 checkClientsCount(t, server, 1) 1963 1964 // Ensure new connections fail. 1965 _, err = nats.Connect(addr) 1966 if err == nil { 1967 t.Fatal("Expected error on connect") 1968 } 1969 } 1970 1971 // Ensure reload supports changing the max payload size. Test this by starting 1972 // a server with the default size limit, ensuring publishes work, reloading 1973 // with a restrictive limit, and ensuring publishing an oversized message fails 1974 // and disconnects the client. 1975 func TestConfigReloadMaxPayload(t *testing.T) { 1976 server, opts, config := runReloadServerWithConfig(t, "./configs/reload/basic.conf") 1977 defer server.Shutdown() 1978 1979 addr := fmt.Sprintf("nats://%s:%d", opts.Host, server.Addr().(*net.TCPAddr).Port) 1980 nc, err := nats.Connect(addr) 1981 if err != nil { 1982 t.Fatalf("Error creating client: %v", err) 1983 } 1984 defer nc.Close() 1985 closed := make(chan struct{}) 1986 nc.SetDisconnectHandler(func(*nats.Conn) { 1987 closed <- struct{}{} 1988 }) 1989 1990 conn, err := nats.Connect(addr) 1991 if err != nil { 1992 t.Fatalf("Error creating client: %v", err) 1993 } 1994 defer conn.Close() 1995 sub, err := conn.SubscribeSync("foo") 1996 if err != nil { 1997 t.Fatalf("Error subscribing: %v", err) 1998 } 1999 conn.Flush() 2000 2001 // Ensure we can publish as a sanity check. 2002 if err := nc.Publish("foo", []byte("hello")); err != nil { 2003 t.Fatalf("Error publishing: %v", err) 2004 } 2005 nc.Flush() 2006 _, err = sub.NextMsg(2 * time.Second) 2007 if err != nil { 2008 t.Fatalf("Error receiving message: %v", err) 2009 } 2010 2011 // Set max payload to one. 2012 changeCurrentConfigContent(t, config, "./configs/reload/max_payload.conf") 2013 if err := server.Reload(); err != nil { 2014 t.Fatalf("Error reloading config: %v", err) 2015 } 2016 2017 // Ensure oversized messages don't get delivered and the client is 2018 // disconnected. 2019 if err := nc.Publish("foo", []byte("hello")); err != nil { 2020 t.Fatalf("Error publishing: %v", err) 2021 } 2022 nc.Flush() 2023 _, err = sub.NextMsg(20 * time.Millisecond) 2024 if err != nats.ErrTimeout { 2025 t.Fatalf("Expected ErrTimeout, got: %v", err) 2026 } 2027 2028 select { 2029 case <-closed: 2030 case <-time.After(5 * time.Second): 2031 t.Fatal("Expected to be disconnected") 2032 } 2033 } 2034 2035 // Ensure reload supports rotating out files. Test this by starting 2036 // a server with log and pid files, reloading new ones, then check that 2037 // we can rename and delete the old log/pid files. 2038 func TestConfigReloadRotateFiles(t *testing.T) { 2039 server, _, config := runReloadServerWithConfig(t, "./configs/reload/file_rotate.conf") 2040 defer func() { 2041 removeFile(t, "log1.txt") 2042 removeFile(t, "nats-server1.pid") 2043 }() 2044 defer server.Shutdown() 2045 2046 // Configure the logger to enable actual logging 2047 opts := server.getOpts() 2048 opts.NoLog = false 2049 server.ConfigureLogger() 2050 2051 // Load a config that renames the files. 2052 changeCurrentConfigContent(t, config, "./configs/reload/file_rotate1.conf") 2053 if err := server.Reload(); err != nil { 2054 t.Fatalf("Error reloading config: %v", err) 2055 } 2056 2057 // Make sure the new files exist. 2058 if _, err := os.Stat("log1.txt"); os.IsNotExist(err) { 2059 t.Fatalf("Error reloading config, no new file: %v", err) 2060 } 2061 if _, err := os.Stat("nats-server1.pid"); os.IsNotExist(err) { 2062 t.Fatalf("Error reloading config, no new file: %v", err) 2063 } 2064 2065 // Check that old file can be renamed. 2066 if err := os.Rename("log.txt", "log_old.txt"); err != nil { 2067 t.Fatalf("Error reloading config, cannot rename file: %v", err) 2068 } 2069 if err := os.Rename("nats-server.pid", "nats-server_old.pid"); err != nil { 2070 t.Fatalf("Error reloading config, cannot rename file: %v", err) 2071 } 2072 2073 // Check that the old files can be removed after rename. 2074 removeFile(t, "log_old.txt") 2075 removeFile(t, "nats-server_old.pid") 2076 } 2077 2078 func TestConfigReloadClusterWorks(t *testing.T) { 2079 confBTemplate := ` 2080 listen: -1 2081 cluster: { 2082 listen: 127.0.0.1:7244 2083 authorization { 2084 user: ruser 2085 password: pwd 2086 timeout: %d 2087 } 2088 routes = [ 2089 nats-route://ruser:pwd@127.0.0.1:7246 2090 ] 2091 }` 2092 confB := createConfFile(t, []byte(fmt.Sprintf(confBTemplate, 3))) 2093 2094 confATemplate := ` 2095 listen: -1 2096 cluster: { 2097 listen: 127.0.0.1:7246 2098 authorization { 2099 user: ruser 2100 password: pwd 2101 timeout: %d 2102 } 2103 routes = [ 2104 nats-route://ruser:pwd@127.0.0.1:7244 2105 ] 2106 }` 2107 confA := createConfFile(t, []byte(fmt.Sprintf(confATemplate, 3))) 2108 2109 srvb, _ := RunServerWithConfig(confB) 2110 defer srvb.Shutdown() 2111 2112 srva, _ := RunServerWithConfig(confA) 2113 defer srva.Shutdown() 2114 2115 // Wait for the cluster to form and capture the connection IDs of each route 2116 checkClusterFormed(t, srva, srvb) 2117 2118 getCID := func(s *Server) uint64 { 2119 s.mu.Lock() 2120 defer s.mu.Unlock() 2121 if r := getFirstRoute(s); r != nil { 2122 return r.cid 2123 } 2124 return 0 2125 } 2126 acid := getCID(srva) 2127 bcid := getCID(srvb) 2128 2129 // Update auth timeout to force a check of the connected route auth 2130 reloadUpdateConfig(t, srvb, confB, fmt.Sprintf(confBTemplate, 5)) 2131 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, 5)) 2132 2133 // Wait a little bit to ensure that there is no issue with connection 2134 // breaking at this point (this was an issue before). 2135 time.Sleep(100 * time.Millisecond) 2136 2137 // Cluster should still exist 2138 checkClusterFormed(t, srva, srvb) 2139 2140 // Check that routes were not re-created 2141 newacid := getCID(srva) 2142 newbcid := getCID(srvb) 2143 2144 if newacid != acid { 2145 t.Fatalf("Expected server A route ID to be %v, got %v", acid, newacid) 2146 } 2147 if newbcid != bcid { 2148 t.Fatalf("Expected server B route ID to be %v, got %v", bcid, newbcid) 2149 } 2150 } 2151 2152 func TestConfigReloadClusterPerms(t *testing.T) { 2153 confATemplate := ` 2154 port: -1 2155 cluster { 2156 listen: 127.0.0.1:-1 2157 permissions { 2158 import { 2159 allow: %s 2160 } 2161 export { 2162 allow: %s 2163 } 2164 } 2165 } 2166 no_sys_acc: true 2167 ` 2168 confA := createConfFile(t, []byte(fmt.Sprintf(confATemplate, `"foo"`, `"foo"`))) 2169 srva, _ := RunServerWithConfig(confA) 2170 defer srva.Shutdown() 2171 2172 confBTemplate := ` 2173 port: -1 2174 cluster { 2175 listen: 127.0.0.1:-1 2176 permissions { 2177 import { 2178 allow: %s 2179 } 2180 export { 2181 allow: %s 2182 } 2183 } 2184 routes = [ 2185 "nats://127.0.0.1:%d" 2186 ] 2187 } 2188 no_sys_acc: true 2189 ` 2190 confB := createConfFile(t, []byte(fmt.Sprintf(confBTemplate, `"foo"`, `"foo"`, srva.ClusterAddr().Port))) 2191 srvb, _ := RunServerWithConfig(confB) 2192 defer srvb.Shutdown() 2193 2194 checkClusterFormed(t, srva, srvb) 2195 2196 // Create a connection on A 2197 nca, err := nats.Connect(fmt.Sprintf("nats://127.0.0.1:%d", srva.Addr().(*net.TCPAddr).Port)) 2198 if err != nil { 2199 t.Fatalf("Error on connect: %v", err) 2200 } 2201 defer nca.Close() 2202 // Create a subscription on "foo" and "bar", only "foo" will be also on server B. 2203 subFooOnA, err := nca.SubscribeSync("foo") 2204 if err != nil { 2205 t.Fatalf("Error on subscribe: %v", err) 2206 } 2207 subBarOnA, err := nca.SubscribeSync("bar") 2208 if err != nil { 2209 t.Fatalf("Error on subscribe: %v", err) 2210 } 2211 2212 // Connect on B and do the same 2213 ncb, err := nats.Connect(fmt.Sprintf("nats://127.0.0.1:%d", srvb.Addr().(*net.TCPAddr).Port)) 2214 if err != nil { 2215 t.Fatalf("Error on connect: %v", err) 2216 } 2217 defer ncb.Close() 2218 // Create a subscription on "foo" and "bar", only "foo" will be also on server B. 2219 subFooOnB, err := ncb.SubscribeSync("foo") 2220 if err != nil { 2221 t.Fatalf("Error on subscribe: %v", err) 2222 } 2223 subBarOnB, err := ncb.SubscribeSync("bar") 2224 if err != nil { 2225 t.Fatalf("Error on subscribe: %v", err) 2226 } 2227 2228 // Check subscriptions on each server. There should be 3 on each server, 2229 // foo and bar locally and foo from remote server. 2230 checkExpectedSubs(t, 3, srva, srvb) 2231 2232 sendMsg := func(t *testing.T, subj string, nc *nats.Conn) { 2233 t.Helper() 2234 if err := nc.Publish(subj, []byte("msg")); err != nil { 2235 t.Fatalf("Error on publish: %v", err) 2236 } 2237 } 2238 2239 checkSub := func(t *testing.T, sub *nats.Subscription, shouldReceive bool) { 2240 t.Helper() 2241 _, err := sub.NextMsg(100 * time.Millisecond) 2242 if shouldReceive && err != nil { 2243 t.Fatalf("Expected message on %q, got %v", sub.Subject, err) 2244 } else if !shouldReceive && err == nil { 2245 t.Fatalf("Expected no message on %q, got one", sub.Subject) 2246 } 2247 } 2248 2249 // Produce from A and check received on both sides 2250 sendMsg(t, "foo", nca) 2251 checkSub(t, subFooOnA, true) 2252 checkSub(t, subFooOnB, true) 2253 // Now from B: 2254 sendMsg(t, "foo", ncb) 2255 checkSub(t, subFooOnA, true) 2256 checkSub(t, subFooOnB, true) 2257 2258 // Publish on bar from A and make sure only local sub receives 2259 sendMsg(t, "bar", nca) 2260 checkSub(t, subBarOnA, true) 2261 checkSub(t, subBarOnB, false) 2262 2263 // Publish on bar from B and make sure only local sub receives 2264 sendMsg(t, "bar", ncb) 2265 checkSub(t, subBarOnA, false) 2266 checkSub(t, subBarOnB, true) 2267 2268 // We will now both import/export foo and bar. Start with reloading A. 2269 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, `["foo", "bar"]`, `["foo", "bar"]`)) 2270 2271 // Since B has not been updated yet, the state should remain the same, 2272 // that is 3 subs on each server. 2273 checkExpectedSubs(t, 3, srva, srvb) 2274 2275 // Now update and reload B. Add "baz" for another test down below 2276 reloadUpdateConfig(t, srvb, confB, fmt.Sprintf(confBTemplate, `["foo", "bar", "baz"]`, `["foo", "bar", "baz"]`, srva.ClusterAddr().Port)) 2277 2278 // Now 4 on each server 2279 checkExpectedSubs(t, 4, srva, srvb) 2280 2281 // Make sure that we can receive all messages 2282 sendMsg(t, "foo", nca) 2283 checkSub(t, subFooOnA, true) 2284 checkSub(t, subFooOnB, true) 2285 sendMsg(t, "foo", ncb) 2286 checkSub(t, subFooOnA, true) 2287 checkSub(t, subFooOnB, true) 2288 2289 sendMsg(t, "bar", nca) 2290 checkSub(t, subBarOnA, true) 2291 checkSub(t, subBarOnB, true) 2292 sendMsg(t, "bar", ncb) 2293 checkSub(t, subBarOnA, true) 2294 checkSub(t, subBarOnB, true) 2295 2296 // Create subscription on baz on server B. 2297 subBazOnB, err := ncb.SubscribeSync("baz") 2298 if err != nil { 2299 t.Fatalf("Error on subscribe: %v", err) 2300 } 2301 // Check subscriptions count 2302 checkExpectedSubs(t, 5, srvb) 2303 checkExpectedSubs(t, 4, srva) 2304 2305 sendMsg(t, "baz", nca) 2306 checkSub(t, subBazOnB, false) 2307 sendMsg(t, "baz", ncb) 2308 checkSub(t, subBazOnB, true) 2309 2310 // Test UNSUB by denying something that was previously imported 2311 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, `"foo"`, `["foo", "bar"]`)) 2312 // Since A no longer imports "bar", we should have one less subscription 2313 // on B (B will have received an UNSUB for bar) 2314 checkExpectedSubs(t, 4, srvb) 2315 // A, however, should still have same number of subs. 2316 checkExpectedSubs(t, 4, srva) 2317 2318 // Remove all permissions from A. 2319 reloadUpdateConfig(t, srva, confA, ` 2320 port: -1 2321 cluster { 2322 listen: 127.0.0.1:-1 2323 } 2324 no_sys_acc: true 2325 `) 2326 // Server A should now have baz sub 2327 checkExpectedSubs(t, 5, srvb) 2328 checkExpectedSubs(t, 5, srva) 2329 2330 sendMsg(t, "baz", nca) 2331 checkSub(t, subBazOnB, true) 2332 sendMsg(t, "baz", ncb) 2333 checkSub(t, subBazOnB, true) 2334 2335 // Finally, remove permissions from B 2336 reloadUpdateConfig(t, srvb, confB, fmt.Sprintf(` 2337 port: -1 2338 cluster { 2339 listen: 127.0.0.1:-1 2340 routes = [ 2341 "nats://127.0.0.1:%d" 2342 ] 2343 } 2344 no_sys_acc: true 2345 `, srva.ClusterAddr().Port)) 2346 // Check expected subscriptions count. 2347 checkExpectedSubs(t, 5, srvb) 2348 checkExpectedSubs(t, 5, srva) 2349 } 2350 2351 func TestConfigReloadClusterPermsImport(t *testing.T) { 2352 confATemplate := ` 2353 port: -1 2354 cluster { 2355 listen: 127.0.0.1:-1 2356 permissions { 2357 import: { 2358 allow: %s 2359 } 2360 } 2361 } 2362 no_sys_acc: true 2363 ` 2364 confA := createConfFile(t, []byte(fmt.Sprintf(confATemplate, `["foo", "bar"]`))) 2365 srva, _ := RunServerWithConfig(confA) 2366 defer srva.Shutdown() 2367 2368 confBTemplate := ` 2369 port: -1 2370 cluster { 2371 listen: 127.0.0.1:-1 2372 routes = [ 2373 "nats://127.0.0.1:%d" 2374 ] 2375 } 2376 no_sys_acc: true 2377 ` 2378 confB := createConfFile(t, []byte(fmt.Sprintf(confBTemplate, srva.ClusterAddr().Port))) 2379 srvb, _ := RunServerWithConfig(confB) 2380 defer srvb.Shutdown() 2381 2382 checkClusterFormed(t, srva, srvb) 2383 2384 // Create a connection on A 2385 nca, err := nats.Connect(fmt.Sprintf("nats://127.0.0.1:%d", srva.Addr().(*net.TCPAddr).Port)) 2386 if err != nil { 2387 t.Fatalf("Error on connect: %v", err) 2388 } 2389 defer nca.Close() 2390 // Create a subscription on "foo" and "bar" 2391 if _, err := nca.SubscribeSync("foo"); err != nil { 2392 t.Fatalf("Error on subscribe: %v", err) 2393 } 2394 if _, err := nca.SubscribeSync("bar"); err != nil { 2395 t.Fatalf("Error on subscribe: %v", err) 2396 } 2397 2398 checkExpectedSubs(t, 2, srva, srvb) 2399 2400 // Drop foo 2401 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, `"bar"`)) 2402 checkExpectedSubs(t, 2, srva) 2403 checkExpectedSubs(t, 1, srvb) 2404 2405 // Add it back 2406 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, `["foo", "bar"]`)) 2407 checkExpectedSubs(t, 2, srva, srvb) 2408 2409 // Empty Import means implicit allow 2410 reloadUpdateConfig(t, srva, confA, ` 2411 port: -1 2412 cluster { 2413 listen: 127.0.0.1:-1 2414 permissions { 2415 export: ">" 2416 } 2417 } 2418 no_sys_acc: true 2419 `) 2420 checkExpectedSubs(t, 2, srva, srvb) 2421 2422 confATemplate = ` 2423 port: -1 2424 cluster { 2425 listen: 127.0.0.1:-1 2426 permissions { 2427 import: { 2428 allow: ["foo", "bar"] 2429 deny: %s 2430 } 2431 } 2432 } 2433 no_sys_acc: true 2434 ` 2435 // Now deny all: 2436 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, `["foo", "bar"]`)) 2437 checkExpectedSubs(t, 2, srva) 2438 checkExpectedSubs(t, 0, srvb) 2439 2440 // Drop foo from the deny list 2441 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, `"bar"`)) 2442 checkExpectedSubs(t, 2, srva) 2443 checkExpectedSubs(t, 1, srvb) 2444 } 2445 2446 func TestConfigReloadClusterPermsExport(t *testing.T) { 2447 confATemplate := ` 2448 port: -1 2449 cluster { 2450 listen: 127.0.0.1:-1 2451 permissions { 2452 export: { 2453 allow: %s 2454 } 2455 } 2456 } 2457 no_sys_acc: true 2458 ` 2459 confA := createConfFile(t, []byte(fmt.Sprintf(confATemplate, `["foo", "bar"]`))) 2460 srva, _ := RunServerWithConfig(confA) 2461 defer srva.Shutdown() 2462 2463 confBTemplate := ` 2464 port: -1 2465 cluster { 2466 listen: 127.0.0.1:-1 2467 routes = [ 2468 "nats://127.0.0.1:%d" 2469 ] 2470 } 2471 no_sys_acc: true 2472 ` 2473 confB := createConfFile(t, []byte(fmt.Sprintf(confBTemplate, srva.ClusterAddr().Port))) 2474 srvb, _ := RunServerWithConfig(confB) 2475 defer srvb.Shutdown() 2476 2477 checkClusterFormed(t, srva, srvb) 2478 2479 // Create a connection on B 2480 ncb, err := nats.Connect(fmt.Sprintf("nats://127.0.0.1:%d", srvb.Addr().(*net.TCPAddr).Port)) 2481 if err != nil { 2482 t.Fatalf("Error on connect: %v", err) 2483 } 2484 defer ncb.Close() 2485 // Create a subscription on "foo" and "bar" 2486 if _, err := ncb.SubscribeSync("foo"); err != nil { 2487 t.Fatalf("Error on subscribe: %v", err) 2488 } 2489 if _, err := ncb.SubscribeSync("bar"); err != nil { 2490 t.Fatalf("Error on subscribe: %v", err) 2491 } 2492 2493 checkExpectedSubs(t, 2, srva, srvb) 2494 2495 // Drop foo 2496 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, `"bar"`)) 2497 checkExpectedSubs(t, 2, srvb) 2498 checkExpectedSubs(t, 1, srva) 2499 2500 // Add it back 2501 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, `["foo", "bar"]`)) 2502 checkExpectedSubs(t, 2, srva, srvb) 2503 2504 // Empty Export means implicit allow 2505 reloadUpdateConfig(t, srva, confA, ` 2506 port: -1 2507 cluster { 2508 listen: 127.0.0.1:-1 2509 permissions { 2510 import: ">" 2511 } 2512 } 2513 no_sys_acc: true 2514 `) 2515 checkExpectedSubs(t, 2, srva, srvb) 2516 2517 confATemplate = ` 2518 port: -1 2519 cluster { 2520 listen: 127.0.0.1:-1 2521 permissions { 2522 export: { 2523 allow: ["foo", "bar"] 2524 deny: %s 2525 } 2526 } 2527 } 2528 no_sys_acc: true 2529 ` 2530 // Now deny all: 2531 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, `["foo", "bar"]`)) 2532 checkExpectedSubs(t, 0, srva) 2533 checkExpectedSubs(t, 2, srvb) 2534 2535 // Drop foo from the deny list 2536 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, `"bar"`)) 2537 checkExpectedSubs(t, 1, srva) 2538 checkExpectedSubs(t, 2, srvb) 2539 } 2540 2541 func TestConfigReloadClusterPermsOldServer(t *testing.T) { 2542 confATemplate := ` 2543 port: -1 2544 cluster { 2545 listen: 127.0.0.1:-1 2546 permissions { 2547 export: { 2548 allow: %s 2549 } 2550 } 2551 } 2552 ` 2553 confA := createConfFile(t, []byte(fmt.Sprintf(confATemplate, `["foo", "bar"]`))) 2554 srva, _ := RunServerWithConfig(confA) 2555 defer srva.Shutdown() 2556 2557 optsB := DefaultOptions() 2558 optsB.Routes = RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", srva.ClusterAddr().Port)) 2559 // Make server B behave like an old server 2560 optsB.overrideProto = setServerProtoForTest(RouteProtoZero) 2561 srvb := RunServer(optsB) 2562 defer srvb.Shutdown() 2563 2564 checkClusterFormed(t, srva, srvb) 2565 2566 // Get the route's connection ID 2567 getRouteRID := func() uint64 { 2568 rid := uint64(0) 2569 srvb.mu.Lock() 2570 if r := getFirstRoute(srvb); r != nil { 2571 r.mu.Lock() 2572 rid = r.cid 2573 r.mu.Unlock() 2574 } 2575 srvb.mu.Unlock() 2576 return rid 2577 } 2578 orgRID := getRouteRID() 2579 2580 // Cause a config reload on A 2581 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, `"bar"`)) 2582 2583 // Check that new route gets created 2584 check := func(t *testing.T) { 2585 t.Helper() 2586 checkFor(t, 3*time.Second, 15*time.Millisecond, func() error { 2587 if rid := getRouteRID(); rid == orgRID { 2588 return fmt.Errorf("Route does not seem to have been recreated") 2589 } 2590 return nil 2591 }) 2592 } 2593 check(t) 2594 2595 // Save the current value 2596 orgRID = getRouteRID() 2597 2598 // Add another server that supports INFO updates 2599 2600 optsC := DefaultOptions() 2601 optsC.Routes = RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", srva.ClusterAddr().Port)) 2602 srvc := RunServer(optsC) 2603 defer srvc.Shutdown() 2604 2605 checkClusterFormed(t, srva, srvb, srvc) 2606 2607 // Cause a config reload on A 2608 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, `"foo"`)) 2609 // Check that new route gets created 2610 check(t) 2611 } 2612 2613 func TestConfigReloadAccountUsers(t *testing.T) { 2614 conf := createConfFile(t, []byte(` 2615 listen: "127.0.0.1:-1" 2616 accounts { 2617 synadia { 2618 users = [ 2619 {user: derek, password: derek} 2620 {user: foo, password: foo} 2621 ] 2622 } 2623 nats.io { 2624 users = [ 2625 {user: ivan, password: ivan} 2626 {user: bar, password: bar} 2627 ] 2628 } 2629 acc_deleted_after_reload { 2630 users = [ 2631 {user: gone, password: soon} 2632 {user: baz, password: baz} 2633 {user: bat, password: bat} 2634 ] 2635 } 2636 } 2637 `)) 2638 s, opts := RunServerWithConfig(conf) 2639 defer s.Shutdown() 2640 2641 // Connect as exisiting users, should work. 2642 nc, err := nats.Connect(fmt.Sprintf("nats://derek:derek@%s:%d", opts.Host, opts.Port)) 2643 if err != nil { 2644 t.Fatalf("Error on connect: %v", err) 2645 } 2646 defer nc.Close() 2647 ch := make(chan bool, 2) 2648 cb := func(_ *nats.Conn) { 2649 ch <- true 2650 } 2651 nc2, err := nats.Connect( 2652 fmt.Sprintf("nats://ivan:ivan@%s:%d", opts.Host, opts.Port), 2653 nats.NoReconnect(), 2654 nats.ClosedHandler(cb)) 2655 if err != nil { 2656 t.Fatalf("Error on connect: %v", err) 2657 } 2658 defer nc2.Close() 2659 nc3, err := nats.Connect( 2660 fmt.Sprintf("nats://gone:soon@%s:%d", opts.Host, opts.Port), 2661 nats.NoReconnect(), 2662 nats.ClosedHandler(cb)) 2663 if err != nil { 2664 t.Fatalf("Error on connect: %v", err) 2665 } 2666 defer nc3.Close() 2667 // These users will be moved from an account to another (to a specific or to global account) 2668 // We will create subscriptions to ensure that they are moved to proper sublists too. 2669 rch := make(chan bool, 4) 2670 rcb := func(_ *nats.Conn) { 2671 rch <- true 2672 } 2673 nc4, err := nats.Connect(fmt.Sprintf("nats://foo:foo@%s:%d", opts.Host, opts.Port), 2674 nats.ReconnectWait(50*time.Millisecond), nats.ReconnectHandler(rcb)) 2675 if err != nil { 2676 t.Fatalf("Error on connect: %v", err) 2677 } 2678 defer nc4.Close() 2679 if _, err := nc4.SubscribeSync("foo"); err != nil { 2680 t.Fatalf("Error on subscribe: %v", err) 2681 } 2682 nc5, err := nats.Connect(fmt.Sprintf("nats://bar:bar@%s:%d", opts.Host, opts.Port), 2683 nats.ReconnectWait(50*time.Millisecond), nats.ReconnectHandler(rcb)) 2684 if err != nil { 2685 t.Fatalf("Error on connect: %v", err) 2686 } 2687 defer nc5.Close() 2688 if _, err := nc5.SubscribeSync("bar"); err != nil { 2689 t.Fatalf("Error on subscribe: %v", err) 2690 } 2691 nc6, err := nats.Connect(fmt.Sprintf("nats://baz:baz@%s:%d", opts.Host, opts.Port), 2692 nats.ReconnectWait(50*time.Millisecond), nats.ReconnectHandler(rcb)) 2693 if err != nil { 2694 t.Fatalf("Error on connect: %v", err) 2695 } 2696 defer nc6.Close() 2697 if _, err := nc6.SubscribeSync("baz"); err != nil { 2698 t.Fatalf("Error on subscribe: %v", err) 2699 } 2700 nc7, err := nats.Connect(fmt.Sprintf("nats://bat:bat@%s:%d", opts.Host, opts.Port), 2701 nats.ReconnectWait(50*time.Millisecond), nats.ReconnectHandler(rcb)) 2702 if err != nil { 2703 t.Fatalf("Error on connect: %v", err) 2704 } 2705 defer nc7.Close() 2706 if _, err := nc7.SubscribeSync("bat"); err != nil { 2707 t.Fatalf("Error on subscribe: %v", err) 2708 } 2709 2710 // confirm subscriptions before and after reload. 2711 var expectedSubs uint32 = 5 2712 sAcc, err := s.LookupAccount("synadia") 2713 require_NoError(t, err) 2714 sAcc.mu.RLock() 2715 n := sAcc.sl.Count() 2716 sAcc.mu.RUnlock() 2717 if n != expectedSubs { 2718 t.Errorf("Synadia account should have %d sub, got %v", expectedSubs, n) 2719 } 2720 nAcc, err := s.LookupAccount("nats.io") 2721 require_NoError(t, err) 2722 nAcc.mu.RLock() 2723 n = nAcc.sl.Count() 2724 nAcc.mu.RUnlock() 2725 if n != expectedSubs { 2726 t.Errorf("Nats.io account should have %d sub, got %v", expectedSubs, n) 2727 } 2728 2729 // Remove user from account and whole account 2730 reloadUpdateConfig(t, s, conf, ` 2731 listen: "127.0.0.1:-1" 2732 authorization { 2733 users = [ 2734 {user: foo, password: foo} 2735 {user: baz, password: baz} 2736 ] 2737 } 2738 accounts { 2739 synadia { 2740 users = [ 2741 {user: derek, password: derek} 2742 {user: bar, password: bar} 2743 ] 2744 } 2745 nats.io { 2746 users = [ 2747 {user: bat, password: bat} 2748 ] 2749 } 2750 } 2751 `) 2752 // nc2 and nc3 should be closed 2753 if err := wait(ch); err != nil { 2754 t.Fatal("Did not get the closed callback") 2755 } 2756 if err := wait(ch); err != nil { 2757 t.Fatal("Did not get the closed callback") 2758 } 2759 // And first connection should still be connected 2760 if !nc.IsConnected() { 2761 t.Fatal("First connection should still be connected") 2762 } 2763 2764 // Old account should be gone 2765 if _, err := s.LookupAccount("acc_deleted_after_reload"); err == nil { 2766 t.Fatal("old account should be gone") 2767 } 2768 2769 // Check subscriptions. Since most of the users have been 2770 // moving accounts, make sure we account for the reconnect 2771 for i := 0; i < 4; i++ { 2772 if err := wait(rch); err != nil { 2773 t.Fatal("Did not get the reconnect cb") 2774 } 2775 } 2776 // Still need to do the tests in a checkFor() because clients 2777 // being reconnected does not mean that resent of subscriptions 2778 // has already been processed. 2779 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 2780 gAcc, err := s.LookupAccount(globalAccountName) 2781 require_NoError(t, err) 2782 gAcc.mu.RLock() 2783 n := gAcc.sl.Count() 2784 fooMatch := gAcc.sl.Match("foo") 2785 bazMatch := gAcc.sl.Match("baz") 2786 gAcc.mu.RUnlock() 2787 // The number of subscriptions should be 4 ($SYS.REQ.USER.INFO, 2788 // $SYS.REQ.ACCOUNT.PING.CONNZ, $SYS.REQ.ACCOUNT.PING.STATZ, 2789 // $SYS.REQ.SERVER.PING.CONNZ) + 2 (foo and baz) 2790 if n != 6 { 2791 return fmt.Errorf("Global account should have 6 subs, got %v", n) 2792 } 2793 if len(fooMatch.psubs) != 1 { 2794 return fmt.Errorf("Global account should have foo sub") 2795 } 2796 if len(bazMatch.psubs) != 1 { 2797 return fmt.Errorf("Global account should have baz sub") 2798 } 2799 2800 sAcc, err := s.LookupAccount("synadia") 2801 require_NoError(t, err) 2802 sAcc.mu.RLock() 2803 n = sAcc.sl.Count() 2804 barMatch := sAcc.sl.Match("bar") 2805 2806 sAcc.mu.RUnlock() 2807 if n != expectedSubs { 2808 return fmt.Errorf("Synadia account should have %d sub, got %v", expectedSubs, n) 2809 } 2810 if len(barMatch.psubs) != 1 { 2811 return fmt.Errorf("Synadia account should have bar sub") 2812 } 2813 2814 nAcc, err := s.LookupAccount("nats.io") 2815 require_NoError(t, err) 2816 nAcc.mu.RLock() 2817 n = nAcc.sl.Count() 2818 batMatch := nAcc.sl.Match("bat") 2819 nAcc.mu.RUnlock() 2820 if n != expectedSubs { 2821 return fmt.Errorf("Nats.io account should have %d sub, got %v", expectedSubs, n) 2822 } 2823 if len(batMatch.psubs) != 1 { 2824 return fmt.Errorf("Synadia account should have bar sub") 2825 } 2826 return nil 2827 }) 2828 } 2829 2830 func TestConfigReloadAccountWithNoChanges(t *testing.T) { 2831 conf := createConfFile(t, []byte(` 2832 listen: "127.0.0.1:-1" 2833 system_account: sys 2834 accounts { 2835 A { 2836 users = [{ user: a }] 2837 } 2838 B { 2839 users = [{ user: b }] 2840 } 2841 C { 2842 users = [{ user: c }] 2843 } 2844 sys { 2845 users = [{ user: sys }] 2846 } 2847 } 2848 `)) 2849 s, opts := RunServerWithConfig(conf) 2850 defer s.Shutdown() 2851 2852 ncA, err := nats.Connect(fmt.Sprintf("nats://a:@%s:%d", opts.Host, opts.Port)) 2853 if err != nil { 2854 t.Fatalf("Error on connect: %v", err) 2855 } 2856 defer ncA.Close() 2857 2858 // Confirm default service imports are ok. 2859 checkSubs := func(t *testing.T) { 2860 resp, err := ncA.Request("$SYS.REQ.ACCOUNT.PING.CONNZ", nil, time.Second) 2861 if err != nil { 2862 t.Error(err) 2863 } 2864 if resp == nil || !strings.Contains(string(resp.Data), `"num_connections":1`) { 2865 t.Fatal("unexpected data in connz response") 2866 } 2867 resp, err = ncA.Request("$SYS.REQ.SERVER.PING.CONNZ", nil, time.Second) 2868 if err != nil { 2869 t.Error(err) 2870 } 2871 if resp == nil || !strings.Contains(string(resp.Data), `"num_connections":1`) { 2872 t.Fatal("unexpected data in connz response") 2873 } 2874 resp, err = ncA.Request("$SYS.REQ.ACCOUNT.PING.STATZ", nil, time.Second) 2875 if err != nil { 2876 t.Error(err) 2877 } 2878 if resp == nil || !strings.Contains(string(resp.Data), `"conns":1`) { 2879 t.Fatal("unexpected data in connz response") 2880 } 2881 } 2882 checkSubs(t) 2883 before := s.NumSubscriptions() 2884 s.Reload() 2885 after := s.NumSubscriptions() 2886 if before != after { 2887 t.Errorf("Number of subscriptions changed after reload: %d -> %d", before, after) 2888 } 2889 2890 // Confirm this still works after a reload... 2891 checkSubs(t) 2892 before = s.NumSubscriptions() 2893 s.Reload() 2894 after = s.NumSubscriptions() 2895 if before != after { 2896 t.Errorf("Number of subscriptions changed after reload: %d -> %d", before, after) 2897 } 2898 2899 // Do another extra reload just in case. 2900 checkSubs(t) 2901 before = s.NumSubscriptions() 2902 s.Reload() 2903 after = s.NumSubscriptions() 2904 if before != after { 2905 t.Errorf("Number of subscriptions changed after reload: %d -> %d", before, after) 2906 } 2907 } 2908 2909 func TestConfigReloadAccountNKeyUsers(t *testing.T) { 2910 conf := createConfFile(t, []byte(` 2911 listen: "127.0.0.1:-1" 2912 accounts { 2913 synadia { 2914 users = [ 2915 # Derek 2916 {nkey : UCNGL4W5QX66CFX6A6DCBVDH5VOHMI7B2UZZU7TXAUQQSI2JPHULCKBR} 2917 ] 2918 } 2919 nats.io { 2920 users = [ 2921 # Ivan 2922 {nkey : UDPGQVFIWZ7Q5UH4I5E6DBCZULQS6VTVBG6CYBD7JV3G3N2GMQOMNAUH} 2923 ] 2924 } 2925 } 2926 `)) 2927 s, _ := RunServerWithConfig(conf) 2928 defer s.Shutdown() 2929 2930 synadia, _ := s.LookupAccount("synadia") 2931 nats, _ := s.LookupAccount("nats.io") 2932 2933 seed1 := []byte("SUAPM67TC4RHQLKBX55NIQXSMATZDOZK6FNEOSS36CAYA7F7TY66LP4BOM") 2934 seed2 := []byte("SUAIS5JPX4X4GJ7EIIJEQ56DH2GWPYJRPWN5XJEDENJOZHCBLI7SEPUQDE") 2935 2936 kp, _ := nkeys.FromSeed(seed1) 2937 pubKey, _ := kp.PublicKey() 2938 2939 c, cr, l := newClientForServer(s) 2940 defer c.close() 2941 // Check for Nonce 2942 var info nonceInfo 2943 if err := json.Unmarshal([]byte(l[5:]), &info); err != nil { 2944 t.Fatalf("Could not parse INFO json: %v\n", err) 2945 } 2946 if info.Nonce == "" { 2947 t.Fatalf("Expected a non-empty nonce with nkeys defined") 2948 } 2949 sigraw, err := kp.Sign([]byte(info.Nonce)) 2950 if err != nil { 2951 t.Fatalf("Failed signing nonce: %v", err) 2952 } 2953 sig := base64.RawURLEncoding.EncodeToString(sigraw) 2954 2955 // PING needed to flush the +OK to us. 2956 cs := fmt.Sprintf("CONNECT {\"nkey\":%q,\"sig\":\"%s\",\"verbose\":true,\"pedantic\":true}\r\nPING\r\n", pubKey, sig) 2957 c.parseAsync(cs) 2958 l, _ = cr.ReadString('\n') 2959 if !strings.HasPrefix(l, "+OK") { 2960 t.Fatalf("Expected an OK, got: %v", l) 2961 } 2962 if c.acc != synadia { 2963 t.Fatalf("Expected the nkey client's account to match 'synadia', got %v", c.acc) 2964 } 2965 2966 // Now nats account nkey user. 2967 kp, _ = nkeys.FromSeed(seed2) 2968 pubKey, _ = kp.PublicKey() 2969 2970 c, cr, l = newClientForServer(s) 2971 defer c.close() 2972 // Check for Nonce 2973 err = json.Unmarshal([]byte(l[5:]), &info) 2974 if err != nil { 2975 t.Fatalf("Could not parse INFO json: %v\n", err) 2976 } 2977 if info.Nonce == "" { 2978 t.Fatalf("Expected a non-empty nonce with nkeys defined") 2979 } 2980 sigraw, err = kp.Sign([]byte(info.Nonce)) 2981 if err != nil { 2982 t.Fatalf("Failed signing nonce: %v", err) 2983 } 2984 sig = base64.RawURLEncoding.EncodeToString(sigraw) 2985 2986 // PING needed to flush the +OK to us. 2987 cs = fmt.Sprintf("CONNECT {\"nkey\":%q,\"sig\":\"%s\",\"verbose\":true,\"pedantic\":true}\r\nPING\r\n", pubKey, sig) 2988 c.parseAsync(cs) 2989 l, _ = cr.ReadString('\n') 2990 if !strings.HasPrefix(l, "+OK") { 2991 t.Fatalf("Expected an OK, got: %v", l) 2992 } 2993 if c.acc != nats { 2994 t.Fatalf("Expected the nkey client's account to match 'nats', got %v", c.acc) 2995 } 2996 2997 // Remove user from account and whole account 2998 reloadUpdateConfig(t, s, conf, ` 2999 listen: "127.0.0.1:-1" 3000 authorization { 3001 users = [ 3002 # Ivan 3003 {nkey : UDPGQVFIWZ7Q5UH4I5E6DBCZULQS6VTVBG6CYBD7JV3G3N2GMQOMNAUH} 3004 ] 3005 } 3006 accounts { 3007 nats.io { 3008 users = [ 3009 # Derek 3010 {nkey : UCNGL4W5QX66CFX6A6DCBVDH5VOHMI7B2UZZU7TXAUQQSI2JPHULCKBR} 3011 ] 3012 } 3013 } 3014 `) 3015 3016 s.mu.Lock() 3017 nkeys := s.nkeys 3018 globalAcc := s.gacc 3019 s.mu.Unlock() 3020 3021 if n := len(nkeys); n != 2 { 3022 t.Fatalf("NKeys map should have 2 users, got %v", n) 3023 } 3024 derek := nkeys["UCNGL4W5QX66CFX6A6DCBVDH5VOHMI7B2UZZU7TXAUQQSI2JPHULCKBR"] 3025 if derek == nil { 3026 t.Fatal("NKey for user Derek not found") 3027 } 3028 if derek.Account == nil || derek.Account.Name != "nats.io" { 3029 t.Fatalf("Invalid account for user Derek: %#v", derek.Account) 3030 } 3031 ivan := nkeys["UDPGQVFIWZ7Q5UH4I5E6DBCZULQS6VTVBG6CYBD7JV3G3N2GMQOMNAUH"] 3032 if ivan == nil { 3033 t.Fatal("NKey for user Ivan not found") 3034 } 3035 if ivan.Account != globalAcc { 3036 t.Fatalf("Invalid account for user Ivan: %#v", ivan.Account) 3037 } 3038 if _, err := s.LookupAccount("synadia"); err == nil { 3039 t.Fatal("Account Synadia should have been removed") 3040 } 3041 } 3042 3043 func TestConfigReloadAccountStreamsImportExport(t *testing.T) { 3044 template := ` 3045 listen: "127.0.0.1:-1" 3046 accounts { 3047 synadia { 3048 users [{user: derek, password: foo}] 3049 exports = [ 3050 {stream: "private.>", accounts: [nats.io]} 3051 {stream: %s} 3052 ] 3053 } 3054 nats.io { 3055 users [ 3056 {user: ivan, password: bar, permissions: {subscribe: {deny: %s}}} 3057 ] 3058 imports = [ 3059 {stream: {account: "synadia", subject: %s}} 3060 {stream: {account: "synadia", subject: "private.natsio.*"}, prefix: %s} 3061 ] 3062 } 3063 } 3064 no_sys_acc: true 3065 ` 3066 // synadia account exports "private.>" to nats.io 3067 // synadia account exports "foo.*" 3068 // user ivan denies subscription on "xxx" 3069 // nats.io account imports "foo.*" from synadia 3070 // nats.io account imports "private.natsio.*" from synadia with prefix "ivan" 3071 conf := createConfFile(t, []byte(fmt.Sprintf(template, `"foo.*"`, `"xxx"`, `"foo.*"`, `"ivan"`))) 3072 s, opts := RunServerWithConfig(conf) 3073 defer s.Shutdown() 3074 3075 derek, err := nats.Connect(fmt.Sprintf("nats://derek:foo@%s:%d", opts.Host, opts.Port)) 3076 if err != nil { 3077 t.Fatalf("Error on connect: %v", err) 3078 } 3079 defer derek.Close() 3080 checkClientsCount(t, s, 1) 3081 3082 ch := make(chan bool, 1) 3083 ivan, err := nats.Connect(fmt.Sprintf("nats://ivan:bar@%s:%d", opts.Host, opts.Port), 3084 nats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) { 3085 if strings.Contains(strings.ToLower(err.Error()), "permissions violation") { 3086 ch <- true 3087 } 3088 })) 3089 if err != nil { 3090 t.Fatalf("Error on connect: %v", err) 3091 } 3092 defer ivan.Close() 3093 checkClientsCount(t, s, 2) 3094 3095 subscribe := func(t *testing.T, nc *nats.Conn, subj string) *nats.Subscription { 3096 t.Helper() 3097 s, err := nc.SubscribeSync(subj) 3098 if err != nil { 3099 t.Fatalf("Error on subscribe: %v", err) 3100 } 3101 return s 3102 } 3103 3104 subFooBar := subscribe(t, ivan, "foo.bar") 3105 subFooBaz := subscribe(t, ivan, "foo.baz") 3106 subFooBat := subscribe(t, ivan, "foo.bat") 3107 subPriv := subscribe(t, ivan, "ivan.private.natsio.*") 3108 ivan.Flush() 3109 3110 publish := func(t *testing.T, nc *nats.Conn, subj string) { 3111 t.Helper() 3112 if err := nc.Publish(subj, []byte("hello")); err != nil { 3113 t.Fatalf("Error on publish: %v", err) 3114 } 3115 } 3116 3117 nextMsg := func(t *testing.T, sub *nats.Subscription, expected bool) { 3118 t.Helper() 3119 dur := 100 * time.Millisecond 3120 if expected { 3121 dur = time.Second 3122 } 3123 _, err := sub.NextMsg(dur) 3124 if expected && err != nil { 3125 t.Fatalf("Expected a message on %s, got %v", sub.Subject, err) 3126 } else if !expected && err != nats.ErrTimeout { 3127 t.Fatalf("Expected a timeout on %s, got %v", sub.Subject, err) 3128 } 3129 } 3130 3131 // Checks the derek's user sublist for presence of given subject 3132 // interest. Boolean says if interest is expected or not. 3133 checkSublist := func(t *testing.T, subject string, shouldBeThere bool) { 3134 t.Helper() 3135 dcli := s.getClient(1) 3136 dcli.mu.Lock() 3137 r := dcli.acc.sl.Match(subject) 3138 dcli.mu.Unlock() 3139 if shouldBeThere && len(r.psubs) != 1 { 3140 t.Fatalf("%s should have 1 match in derek's sublist, got %v", subject, len(r.psubs)) 3141 } else if !shouldBeThere && len(r.psubs) > 0 { 3142 t.Fatalf("%s should not be in derek's sublist", subject) 3143 } 3144 } 3145 3146 // Publish on all subjects and the subs should receive and 3147 // subjects should be in sublist 3148 publish(t, derek, "foo.bar") 3149 nextMsg(t, subFooBar, true) 3150 checkSublist(t, "foo.bar", true) 3151 3152 publish(t, derek, "foo.baz") 3153 nextMsg(t, subFooBaz, true) 3154 checkSublist(t, "foo.baz", true) 3155 3156 publish(t, derek, "foo.bat") 3157 nextMsg(t, subFooBat, true) 3158 checkSublist(t, "foo.bat", true) 3159 3160 publish(t, derek, "private.natsio.foo") 3161 nextMsg(t, subPriv, true) 3162 checkSublist(t, "private.natsio.foo", true) 3163 3164 // Also make sure that intra-account subscription works OK 3165 ivanSub := subscribe(t, ivan, "ivan.sub") 3166 publish(t, ivan, "ivan.sub") 3167 nextMsg(t, ivanSub, true) 3168 derekSub := subscribe(t, derek, "derek.sub") 3169 publish(t, derek, "derek.sub") 3170 nextMsg(t, derekSub, true) 3171 3172 // synadia account exports "private.>" to nats.io 3173 // synadia account exports "foo.*" 3174 // user ivan denies subscription on "foo.bat" 3175 // nats.io account imports "foo.baz" from synadia 3176 // nats.io account imports "private.natsio.*" from synadia with prefix "yyyy" 3177 reloadUpdateConfig(t, s, conf, fmt.Sprintf(template, `"foo.*"`, `"foo.bat"`, `"foo.baz"`, `"yyyy"`)) 3178 3179 // Sub on foo.bar should now fail to receive 3180 publish(t, derek, "foo.bar") 3181 nextMsg(t, subFooBar, false) 3182 checkSublist(t, "foo.bar", false) 3183 // But foo.baz should be received 3184 publish(t, derek, "foo.baz") 3185 nextMsg(t, subFooBaz, true) 3186 checkSublist(t, "foo.baz", true) 3187 // Due to permissions, foo.bat should not 3188 publish(t, derek, "foo.bat") 3189 nextMsg(t, subFooBat, false) 3190 checkSublist(t, "foo.bat", false) 3191 // Prefix changed, so should not be received 3192 publish(t, derek, "private.natsio.foo") 3193 nextMsg(t, subPriv, false) 3194 checkSublist(t, "private.natsio.foo", false) 3195 3196 // Wait for client notification of permissions error 3197 if err := wait(ch); err != nil { 3198 t.Fatal("Did not the permissions error") 3199 } 3200 3201 publish(t, ivan, "ivan.sub") 3202 nextMsg(t, ivanSub, true) 3203 publish(t, derek, "derek.sub") 3204 nextMsg(t, derekSub, true) 3205 3206 // Change export so that foo.* is no longer exported 3207 // synadia account exports "private.>" to nats.io 3208 // synadia account exports "xxx" 3209 // user ivan denies subscription on "foo.bat" 3210 // nats.io account imports "xxx" from synadia 3211 // nats.io account imports "private.natsio.*" from synadia with prefix "ivan" 3212 reloadUpdateConfig(t, s, conf, fmt.Sprintf(template, `"xxx"`, `"foo.bat"`, `"xxx"`, `"ivan"`)) 3213 3214 publish(t, derek, "foo.bar") 3215 nextMsg(t, subFooBar, false) 3216 checkSublist(t, "foo.bar", false) 3217 3218 publish(t, derek, "foo.baz") 3219 nextMsg(t, subFooBaz, false) 3220 checkSublist(t, "foo.baz", false) 3221 3222 publish(t, derek, "foo.bat") 3223 nextMsg(t, subFooBat, false) 3224 checkSublist(t, "foo.bat", false) 3225 3226 // Prefix changed back, so should receive 3227 publish(t, derek, "private.natsio.foo") 3228 nextMsg(t, subPriv, true) 3229 checkSublist(t, "private.natsio.foo", true) 3230 3231 publish(t, ivan, "ivan.sub") 3232 nextMsg(t, ivanSub, true) 3233 publish(t, derek, "derek.sub") 3234 nextMsg(t, derekSub, true) 3235 } 3236 3237 func TestConfigReloadAccountServicesImportExport(t *testing.T) { 3238 conf := createConfFile(t, []byte(` 3239 listen: "127.0.0.1:-1" 3240 accounts { 3241 synadia { 3242 users [{user: derek, password: foo}] 3243 exports = [ 3244 {service: "pub.request"} 3245 {service: "pub.special.request", accounts: [nats.io]} 3246 ] 3247 } 3248 nats.io { 3249 users [{user: ivan, password: bar}] 3250 imports = [ 3251 {service: {account: "synadia", subject: "pub.special.request"}, to: "foo"} 3252 {service: {account: "synadia", subject: "pub.request"}, to: "bar"} 3253 ] 3254 } 3255 } 3256 cluster { 3257 name: "abc" 3258 port: -1 3259 } 3260 `)) 3261 s, opts := RunServerWithConfig(conf) 3262 defer s.Shutdown() 3263 3264 opts2 := DefaultOptions() 3265 opts2.Cluster.Name = "abc" 3266 opts2.Routes = RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", opts.Cluster.Port)) 3267 s2 := RunServer(opts2) 3268 defer s2.Shutdown() 3269 3270 checkClusterFormed(t, s, s2) 3271 3272 derek, err := nats.Connect(fmt.Sprintf("nats://derek:foo@%s:%d", opts.Host, opts.Port)) 3273 if err != nil { 3274 t.Fatalf("Error on connect: %v", err) 3275 } 3276 defer derek.Close() 3277 checkClientsCount(t, s, 1) 3278 3279 ivan, err := nats.Connect(fmt.Sprintf("nats://ivan:bar@%s:%d", opts.Host, opts.Port)) 3280 if err != nil { 3281 t.Fatalf("Error on connect: %v", err) 3282 } 3283 defer ivan.Close() 3284 checkClientsCount(t, s, 2) 3285 3286 if _, err := derek.Subscribe("pub.special.request", func(m *nats.Msg) { 3287 derek.Publish(m.Reply, []byte("reply1")) 3288 }); err != nil { 3289 t.Fatalf("Error on subscribe: %v", err) 3290 } 3291 if _, err := derek.Subscribe("pub.request", func(m *nats.Msg) { 3292 derek.Publish(m.Reply, []byte("reply2")) 3293 }); err != nil { 3294 t.Fatalf("Error on subscribe: %v", err) 3295 } 3296 if _, err := derek.Subscribe("pub.special.request.new", func(m *nats.Msg) { 3297 derek.Publish(m.Reply, []byte("reply3")) 3298 }); err != nil { 3299 t.Fatalf("Error on subscribe: %v", err) 3300 } 3301 // Also create one that will be used for intra-account communication 3302 if _, err := derek.Subscribe("derek.sub", func(m *nats.Msg) { 3303 derek.Publish(m.Reply, []byte("private")) 3304 }); err != nil { 3305 t.Fatalf("Error on subscribe: %v", err) 3306 } 3307 derek.Flush() 3308 3309 // Create an intra-account sub for ivan too 3310 if _, err := ivan.Subscribe("ivan.sub", func(m *nats.Msg) { 3311 ivan.Publish(m.Reply, []byte("private")) 3312 }); err != nil { 3313 t.Fatalf("Error on subscribe: %v", err) 3314 } 3315 // This subscription is just to make sure that we can update 3316 // route map without locking issues during reload. 3317 natsSubSync(t, ivan, "bar") 3318 3319 req := func(t *testing.T, nc *nats.Conn, subj string, reply string) { 3320 t.Helper() 3321 var timeout time.Duration 3322 if reply != "" { 3323 timeout = time.Second 3324 } else { 3325 timeout = 100 * time.Millisecond 3326 } 3327 msg, err := nc.Request(subj, []byte("request"), timeout) 3328 if reply != "" { 3329 if err != nil { 3330 t.Fatalf("Expected reply %s on subject %s, got %v", reply, subj, err) 3331 } 3332 if string(msg.Data) != reply { 3333 t.Fatalf("Expected reply %s on subject %s, got %s", reply, subj, msg.Data) 3334 } 3335 } else if err != nats.ErrTimeout && err != nats.ErrNoResponders { 3336 t.Fatalf("Expected timeout on subject %s, got %v", subj, err) 3337 } 3338 } 3339 3340 req(t, ivan, "foo", "reply1") 3341 req(t, ivan, "bar", "reply2") 3342 // This not exported/imported, so should timeout 3343 req(t, ivan, "baz", "") 3344 3345 // Check intra-account communication 3346 req(t, ivan, "ivan.sub", "private") 3347 req(t, derek, "derek.sub", "private") 3348 3349 reloadUpdateConfig(t, s, conf, ` 3350 listen: "127.0.0.1:-1" 3351 accounts { 3352 synadia { 3353 users [{user: derek, password: foo}] 3354 exports = [ 3355 {service: "pub.request"} 3356 {service: "pub.special.request", accounts: [nats.io]} 3357 {service: "pub.special.request.new", accounts: [nats.io]} 3358 ] 3359 } 3360 nats.io { 3361 users [{user: ivan, password: bar}] 3362 imports = [ 3363 {service: {account: "synadia", subject: "pub.special.request"}, to: "foo"} 3364 {service: {account: "synadia", subject: "pub.special.request.new"}, to: "baz"} 3365 ] 3366 } 3367 } 3368 cluster { 3369 name: "abc" 3370 port: -1 3371 } 3372 `) 3373 // This still should work 3374 req(t, ivan, "foo", "reply1") 3375 // This should not 3376 req(t, ivan, "bar", "") 3377 // This now should work 3378 req(t, ivan, "baz", "reply3") 3379 3380 // Check intra-account communication 3381 req(t, ivan, "ivan.sub", "private") 3382 req(t, derek, "derek.sub", "private") 3383 } 3384 3385 // As of now, config reload does not support changes for gateways. 3386 // However, ensure that if a gateway is defined, one can still 3387 // do reload as long as we don't change the gateway spec. 3388 func TestConfigReloadNotPreventedByGateways(t *testing.T) { 3389 confTemplate := ` 3390 listen: "127.0.0.1:-1" 3391 %s 3392 gateway { 3393 name: "A" 3394 listen: "127.0.0.1:-1" 3395 tls { 3396 cert_file: "configs/certs/server.pem" 3397 key_file: "configs/certs/key.pem" 3398 timeout: %s 3399 } 3400 gateways [ 3401 { 3402 name: "B" 3403 url: "nats://localhost:8888" 3404 } 3405 ] 3406 } 3407 no_sys_acc: true 3408 ` 3409 conf := createConfFile(t, []byte(fmt.Sprintf(confTemplate, "", "5"))) 3410 s, _ := RunServerWithConfig(conf) 3411 defer s.Shutdown() 3412 3413 // Cause reload with adding a param that is supported 3414 reloadUpdateConfig(t, s, conf, fmt.Sprintf(confTemplate, "max_payload: 100000", "5")) 3415 3416 // Now update gateway, should fail to reload. 3417 changeCurrentConfigContentWithNewContent(t, conf, []byte(fmt.Sprintf(confTemplate, "max_payload: 100000", "3"))) 3418 if err := s.Reload(); err == nil || !strings.Contains(err.Error(), "not supported for Gateway") { 3419 t.Fatalf("Expected Reload to return a not supported error, got %v", err) 3420 } 3421 } 3422 3423 func TestConfigReloadBoolFlags(t *testing.T) { 3424 defer func() { FlagSnapshot = nil }() 3425 3426 logfile := filepath.Join(t.TempDir(), "logtime.log") 3427 template := ` 3428 listen: "127.0.0.1:-1" 3429 logfile: "%s" 3430 %s 3431 ` 3432 3433 var opts *Options 3434 var err error 3435 3436 for _, test := range []struct { 3437 name string 3438 content string 3439 cmdLine []string 3440 expected bool 3441 val func() bool 3442 }{ 3443 // Logtime 3444 { 3445 "logtime_not_in_config_no_override", 3446 "", 3447 nil, 3448 true, 3449 func() bool { return opts.Logtime }, 3450 }, 3451 { 3452 "logtime_not_in_config_override_short_true", 3453 "", 3454 []string{"-T"}, 3455 true, 3456 func() bool { return opts.Logtime }, 3457 }, 3458 { 3459 "logtime_not_in_config_override_true", 3460 "", 3461 []string{"-logtime"}, 3462 true, 3463 func() bool { return opts.Logtime }, 3464 }, 3465 { 3466 "logtime_false_in_config_no_override", 3467 "logtime: false", 3468 nil, 3469 false, 3470 func() bool { return opts.Logtime }, 3471 }, 3472 { 3473 "logtime_false_in_config_override_short_true", 3474 "logtime: false", 3475 []string{"-T"}, 3476 true, 3477 func() bool { return opts.Logtime }, 3478 }, 3479 { 3480 "logtime_false_in_config_override_true", 3481 "logtime: false", 3482 []string{"-logtime"}, 3483 true, 3484 func() bool { return opts.Logtime }, 3485 }, 3486 { 3487 "logtime_true_in_config_no_override", 3488 "logtime: true", 3489 nil, 3490 true, 3491 func() bool { return opts.Logtime }, 3492 }, 3493 { 3494 "logtime_true_in_config_override_short_false", 3495 "logtime: true", 3496 []string{"-T=false"}, 3497 false, 3498 func() bool { return opts.Logtime }, 3499 }, 3500 { 3501 "logtime_true_in_config_override_false", 3502 "logtime: true", 3503 []string{"-logtime=false"}, 3504 false, 3505 func() bool { return opts.Logtime }, 3506 }, 3507 // Debug 3508 { 3509 "debug_not_in_config_no_override", 3510 "", 3511 nil, 3512 false, 3513 func() bool { return opts.Debug }, 3514 }, 3515 { 3516 "debug_not_in_config_override_short_true", 3517 "", 3518 []string{"-D"}, 3519 true, 3520 func() bool { return opts.Debug }, 3521 }, 3522 { 3523 "debug_not_in_config_override_true", 3524 "", 3525 []string{"-debug"}, 3526 true, 3527 func() bool { return opts.Debug }, 3528 }, 3529 { 3530 "debug_false_in_config_no_override", 3531 "debug: false", 3532 nil, 3533 false, 3534 func() bool { return opts.Debug }, 3535 }, 3536 { 3537 "debug_false_in_config_override_short_true", 3538 "debug: false", 3539 []string{"-D"}, 3540 true, 3541 func() bool { return opts.Debug }, 3542 }, 3543 { 3544 "debug_false_in_config_override_true", 3545 "debug: false", 3546 []string{"-debug"}, 3547 true, 3548 func() bool { return opts.Debug }, 3549 }, 3550 { 3551 "debug_true_in_config_no_override", 3552 "debug: true", 3553 nil, 3554 true, 3555 func() bool { return opts.Debug }, 3556 }, 3557 { 3558 "debug_true_in_config_override_short_false", 3559 "debug: true", 3560 []string{"-D=false"}, 3561 false, 3562 func() bool { return opts.Debug }, 3563 }, 3564 { 3565 "debug_true_in_config_override_false", 3566 "debug: true", 3567 []string{"-debug=false"}, 3568 false, 3569 func() bool { return opts.Debug }, 3570 }, 3571 // Trace 3572 { 3573 "trace_not_in_config_no_override", 3574 "", 3575 nil, 3576 false, 3577 func() bool { return opts.Trace }, 3578 }, 3579 { 3580 "trace_not_in_config_override_short_true", 3581 "", 3582 []string{"-V"}, 3583 true, 3584 func() bool { return opts.Trace }, 3585 }, 3586 { 3587 "trace_not_in_config_override_true", 3588 "", 3589 []string{"-trace"}, 3590 true, 3591 func() bool { return opts.Trace }, 3592 }, 3593 { 3594 "trace_false_in_config_no_override", 3595 "trace: false", 3596 nil, 3597 false, 3598 func() bool { return opts.Trace }, 3599 }, 3600 { 3601 "trace_false_in_config_override_short_true", 3602 "trace: false", 3603 []string{"-V"}, 3604 true, 3605 func() bool { return opts.Trace }, 3606 }, 3607 { 3608 "trace_false_in_config_override_true", 3609 "trace: false", 3610 []string{"-trace"}, 3611 true, 3612 func() bool { return opts.Trace }, 3613 }, 3614 { 3615 "trace_true_in_config_no_override", 3616 "trace: true", 3617 nil, 3618 true, 3619 func() bool { return opts.Trace }, 3620 }, 3621 { 3622 "trace_true_in_config_override_short_false", 3623 "trace: true", 3624 []string{"-V=false"}, 3625 false, 3626 func() bool { return opts.Trace }, 3627 }, 3628 { 3629 "trace_true_in_config_override_false", 3630 "trace: true", 3631 []string{"-trace=false"}, 3632 false, 3633 func() bool { return opts.Trace }, 3634 }, 3635 // Syslog 3636 { 3637 "syslog_not_in_config_no_override", 3638 "", 3639 nil, 3640 false, 3641 func() bool { return opts.Syslog }, 3642 }, 3643 { 3644 "syslog_not_in_config_override_short_true", 3645 "", 3646 []string{"-s"}, 3647 true, 3648 func() bool { return opts.Syslog }, 3649 }, 3650 { 3651 "syslog_not_in_config_override_true", 3652 "", 3653 []string{"-syslog"}, 3654 true, 3655 func() bool { return opts.Syslog }, 3656 }, 3657 { 3658 "syslog_false_in_config_no_override", 3659 "syslog: false", 3660 nil, 3661 false, 3662 func() bool { return opts.Syslog }, 3663 }, 3664 { 3665 "syslog_false_in_config_override_short_true", 3666 "syslog: false", 3667 []string{"-s"}, 3668 true, 3669 func() bool { return opts.Syslog }, 3670 }, 3671 { 3672 "syslog_false_in_config_override_true", 3673 "syslog: false", 3674 []string{"-syslog"}, 3675 true, 3676 func() bool { return opts.Syslog }, 3677 }, 3678 { 3679 "syslog_true_in_config_no_override", 3680 "syslog: true", 3681 nil, 3682 true, 3683 func() bool { return opts.Syslog }, 3684 }, 3685 { 3686 "syslog_true_in_config_override_short_false", 3687 "syslog: true", 3688 []string{"-s=false"}, 3689 false, 3690 func() bool { return opts.Syslog }, 3691 }, 3692 { 3693 "syslog_true_in_config_override_false", 3694 "syslog: true", 3695 []string{"-syslog=false"}, 3696 false, 3697 func() bool { return opts.Syslog }, 3698 }, 3699 // Cluster.NoAdvertise 3700 { 3701 "cluster_no_advertise_not_in_config_no_override", 3702 `cluster { 3703 port: -1 3704 }`, 3705 nil, 3706 false, 3707 func() bool { return opts.Cluster.NoAdvertise }, 3708 }, 3709 { 3710 "cluster_no_advertise_not_in_config_override_true", 3711 `cluster { 3712 port: -1 3713 }`, 3714 []string{"-no_advertise"}, 3715 true, 3716 func() bool { return opts.Cluster.NoAdvertise }, 3717 }, 3718 { 3719 "cluster_no_advertise_false_in_config_no_override", 3720 `cluster { 3721 port: -1 3722 no_advertise: false 3723 }`, 3724 nil, 3725 false, 3726 func() bool { return opts.Cluster.NoAdvertise }, 3727 }, 3728 { 3729 "cluster_no_advertise_false_in_config_override_true", 3730 `cluster { 3731 port: -1 3732 no_advertise: false 3733 }`, 3734 []string{"-no_advertise"}, 3735 true, 3736 func() bool { return opts.Cluster.NoAdvertise }, 3737 }, 3738 { 3739 "cluster_no_advertise_true_in_config_no_override", 3740 `cluster { 3741 port: -1 3742 no_advertise: true 3743 }`, 3744 nil, 3745 true, 3746 func() bool { return opts.Cluster.NoAdvertise }, 3747 }, 3748 { 3749 "cluster_no_advertise_true_in_config_override_false", 3750 `cluster { 3751 port: -1 3752 no_advertise: true 3753 }`, 3754 []string{"-no_advertise=false"}, 3755 false, 3756 func() bool { return opts.Syslog }, 3757 }, 3758 // -DV override 3759 { 3760 "debug_trace_not_in_config_dv_override_true", 3761 "", 3762 []string{"-DV"}, 3763 true, 3764 func() bool { return opts.Debug && opts.Trace }, 3765 }, 3766 { 3767 "debug_trace_false_in_config_dv_override_true", 3768 `debug: false 3769 trace: false 3770 `, 3771 []string{"-DV"}, 3772 true, 3773 func() bool { return opts.Debug && opts.Trace }, 3774 }, 3775 { 3776 "debug_trace_true_in_config_dv_override_false", 3777 `debug: true 3778 trace: true 3779 `, 3780 []string{"-DV=false"}, 3781 false, 3782 func() bool { return opts.Debug && opts.Trace }, 3783 }, 3784 { 3785 "trace_verbose_true_in_config_override_true", 3786 `trace_verbose: true 3787 `, 3788 nil, 3789 true, 3790 func() bool { return opts.Trace && opts.TraceVerbose }, 3791 }, 3792 { 3793 "trace_verbose_true_in_config_override_false", 3794 `trace_verbose: true 3795 `, 3796 []string{"--VV=false"}, 3797 true, 3798 func() bool { return !opts.TraceVerbose }, 3799 }, 3800 { 3801 "trace_verbose_true_in_config_override_false", 3802 `trace_verbose: false 3803 `, 3804 []string{"--VV=true"}, 3805 true, 3806 func() bool { return opts.TraceVerbose }, 3807 }, 3808 } { 3809 t.Run(test.name, func(t *testing.T) { 3810 conf := createConfFile(t, []byte(fmt.Sprintf(template, logfile, test.content))) 3811 3812 fs := flag.NewFlagSet("test", flag.ContinueOnError) 3813 var args []string 3814 args = append(args, "-c", conf) 3815 if test.cmdLine != nil { 3816 args = append(args, test.cmdLine...) 3817 } 3818 opts, err = ConfigureOptions(fs, args, nil, nil, nil) 3819 if err != nil { 3820 t.Fatalf("Error processing config: %v", err) 3821 } 3822 opts.NoSigs = true 3823 s := RunServer(opts) 3824 defer s.Shutdown() 3825 3826 if test.val() != test.expected { 3827 t.Fatalf("Expected to be set to %v, got %v", test.expected, test.val()) 3828 } 3829 if err := s.Reload(); err != nil { 3830 t.Fatalf("Error on reload: %v", err) 3831 } 3832 if test.val() != test.expected { 3833 t.Fatalf("Expected to be set to %v, got %v", test.expected, test.val()) 3834 } 3835 }) 3836 } 3837 } 3838 3839 func TestConfigReloadMaxControlLineWithClients(t *testing.T) { 3840 server, opts, config := runReloadServerWithConfig(t, "./configs/reload/basic.conf") 3841 defer server.Shutdown() 3842 3843 // Ensure we can connect as a sanity check. 3844 addr := fmt.Sprintf("nats://%s:%d", opts.Host, server.Addr().(*net.TCPAddr).Port) 3845 nc, err := nats.Connect(addr) 3846 if err != nil { 3847 t.Fatalf("Error creating client: %v", err) 3848 } 3849 defer nc.Close() 3850 3851 // Now grab server's internal client that matches. 3852 cid, _ := nc.GetClientID() 3853 c := server.getClient(cid) 3854 if c == nil { 3855 t.Fatalf("Could not look up internal client") 3856 } 3857 3858 // Check that we have the correct mcl snapshotted into the connected client. 3859 getMcl := func(c *client) int32 { 3860 c.mu.Lock() 3861 defer c.mu.Unlock() 3862 return c.mcl 3863 } 3864 if mcl := getMcl(c); mcl != opts.MaxControlLine { 3865 t.Fatalf("Expected snapshot in client for mcl to be same as opts.MaxControlLine, got %d vs %d", 3866 mcl, opts.MaxControlLine) 3867 } 3868 3869 changeCurrentConfigContentWithNewContent(t, config, []byte("listen: 127.0.0.1:-1; max_control_line: 222")) 3870 if err := server.Reload(); err != nil { 3871 t.Fatalf("Expected Reload to succeed, got %v", err) 3872 } 3873 3874 // Refresh properly. 3875 opts = server.getOpts() 3876 3877 if mcl := getMcl(c); mcl != opts.MaxControlLine { 3878 t.Fatalf("Expected snapshot in client for mcl to be same as new opts.MaxControlLine, got %d vs %d", 3879 mcl, opts.MaxControlLine) 3880 } 3881 } 3882 3883 type testCustomAuth struct{} 3884 3885 func (ca *testCustomAuth) Check(c ClientAuthentication) bool { return true } 3886 3887 func TestConfigReloadIgnoreCustomAuth(t *testing.T) { 3888 conf := createConfFile(t, []byte(` 3889 port: -1 3890 `)) 3891 opts := LoadConfig(conf) 3892 3893 ca := &testCustomAuth{} 3894 opts.CustomClientAuthentication = ca 3895 opts.CustomRouterAuthentication = ca 3896 3897 s := RunServer(opts) 3898 defer s.Shutdown() 3899 3900 if err := s.Reload(); err != nil { 3901 t.Fatalf("Error during reload: %v", err) 3902 } 3903 3904 if s.getOpts().CustomClientAuthentication != ca || s.getOpts().CustomRouterAuthentication != ca { 3905 t.Fatalf("Custom auth missing") 3906 } 3907 } 3908 3909 func TestConfigReloadLeafNodeRandomPort(t *testing.T) { 3910 conf := createConfFile(t, []byte(` 3911 port: -1 3912 leafnodes { 3913 port: -1 3914 } 3915 `)) 3916 s, _ := RunServerWithConfig(conf) 3917 defer s.Shutdown() 3918 3919 s.mu.Lock() 3920 lnPortBefore := s.leafNodeListener.Addr().(*net.TCPAddr).Port 3921 s.mu.Unlock() 3922 3923 if err := s.Reload(); err != nil { 3924 t.Fatalf("Error during reload: %v", err) 3925 } 3926 3927 s.mu.Lock() 3928 lnPortAfter := s.leafNodeListener.Addr().(*net.TCPAddr).Port 3929 s.mu.Unlock() 3930 3931 if lnPortBefore != lnPortAfter { 3932 t.Fatalf("Expected leafnodes listen port to be same, was %v is now %v", lnPortBefore, lnPortAfter) 3933 } 3934 } 3935 3936 func TestConfigReloadLeafNodeWithTLS(t *testing.T) { 3937 template := ` 3938 port: -1 3939 %s 3940 leaf { 3941 listen: "127.0.0.1:-1" 3942 tls: { 3943 ca_file: "../test/configs/certs/tlsauth/ca.pem" 3944 cert_file: "../test/configs/certs/tlsauth/server.pem" 3945 key_file: "../test/configs/certs/tlsauth/server-key.pem" 3946 timeout: 3 3947 } 3948 } 3949 ` 3950 conf1 := createConfFile(t, []byte(fmt.Sprintf(template, ""))) 3951 s1, o1 := RunServerWithConfig(conf1) 3952 defer s1.Shutdown() 3953 3954 u, err := url.Parse(fmt.Sprintf("nats://localhost:%d", o1.LeafNode.Port)) 3955 if err != nil { 3956 t.Fatalf("Error creating url: %v", err) 3957 } 3958 conf2 := createConfFile(t, []byte(fmt.Sprintf(` 3959 port: -1 3960 leaf { 3961 remotes [ 3962 { 3963 url: "%s" 3964 tls { 3965 ca_file: "../test/configs/certs/tlsauth/ca.pem" 3966 cert_file: "../test/configs/certs/tlsauth/client.pem" 3967 key_file: "../test/configs/certs/tlsauth/client-key.pem" 3968 timeout: 2 3969 } 3970 } 3971 ] 3972 } 3973 `, u.String()))) 3974 o2, err := ProcessConfigFile(conf2) 3975 if err != nil { 3976 t.Fatalf("Error processing config file: %v", err) 3977 } 3978 o2.NoLog, o2.NoSigs = true, true 3979 o2.LeafNode.resolver = &testLoopbackResolver{} 3980 s2 := RunServer(o2) 3981 defer s2.Shutdown() 3982 3983 checkFor(t, 3*time.Second, 15*time.Millisecond, func() error { 3984 if n := s1.NumLeafNodes(); n != 1 { 3985 return fmt.Errorf("Expected 1 leaf node, got %v", n) 3986 } 3987 return nil 3988 }) 3989 3990 changeCurrentConfigContentWithNewContent(t, conf1, []byte(fmt.Sprintf(template, "debug: false"))) 3991 3992 if err := s1.Reload(); err != nil { 3993 t.Fatalf("Error during reload: %v", err) 3994 } 3995 } 3996 3997 func TestConfigReloadLeafNodeWithRemotesNoChanges(t *testing.T) { 3998 template := ` 3999 port: -1 4000 cluster { 4001 port: -1 4002 name: "%s" 4003 } 4004 leaf { 4005 remotes [ 4006 { 4007 urls: [ 4008 "nats://127.0.0.1:1234", 4009 "nats://127.0.0.1:1235", 4010 "nats://127.0.0.1:1236", 4011 "nats://127.0.0.1:1237", 4012 "nats://127.0.0.1:1238", 4013 "nats://127.0.0.1:1239", 4014 ] 4015 } 4016 ] 4017 } 4018 ` 4019 config := fmt.Sprintf(template, "A") 4020 conf := createConfFile(t, []byte(config)) 4021 o, err := ProcessConfigFile(conf) 4022 if err != nil { 4023 t.Fatalf("Error processing config file: %v", err) 4024 } 4025 o.NoLog, o.NoSigs = true, false 4026 s := RunServer(o) 4027 defer s.Shutdown() 4028 4029 config = fmt.Sprintf(template, "B") 4030 changeCurrentConfigContentWithNewContent(t, conf, []byte(config)) 4031 4032 if err := s.Reload(); err != nil { 4033 t.Fatalf("Error during reload: %v", err) 4034 } 4035 } 4036 4037 func TestConfigReloadAndVarz(t *testing.T) { 4038 template := ` 4039 port: -1 4040 %s 4041 ` 4042 conf := createConfFile(t, []byte(fmt.Sprintf(template, ""))) 4043 s, _ := RunServerWithConfig(conf) 4044 defer s.Shutdown() 4045 4046 s.mu.Lock() 4047 initConfigTime := s.configTime 4048 s.mu.Unlock() 4049 4050 v, _ := s.Varz(nil) 4051 if !v.ConfigLoadTime.Equal(initConfigTime) { 4052 t.Fatalf("ConfigLoadTime should be %v, got %v", initConfigTime, v.ConfigLoadTime) 4053 } 4054 if v.MaxConn != DEFAULT_MAX_CONNECTIONS { 4055 t.Fatalf("MaxConn should be %v, got %v", DEFAULT_MAX_CONNECTIONS, v.MaxConn) 4056 } 4057 4058 changeCurrentConfigContentWithNewContent(t, conf, []byte(fmt.Sprintf(template, "max_connections: 10"))) 4059 4060 // Make sure we wait a bit so config load time has a chance to change. 4061 time.Sleep(15 * time.Millisecond) 4062 4063 if err := s.Reload(); err != nil { 4064 t.Fatalf("Error during reload: %v", err) 4065 } 4066 4067 v, _ = s.Varz(nil) 4068 if v.ConfigLoadTime.Equal(initConfigTime) { 4069 t.Fatalf("ConfigLoadTime should be different from %v", initConfigTime) 4070 } 4071 if v.MaxConn != 10 { 4072 t.Fatalf("MaxConn should be 10, got %v", v.MaxConn) 4073 } 4074 } 4075 4076 func TestConfigReloadConnectErrReports(t *testing.T) { 4077 template := ` 4078 port: -1 4079 %s 4080 %s 4081 ` 4082 conf := createConfFile(t, []byte(fmt.Sprintf(template, "", ""))) 4083 s, _ := RunServerWithConfig(conf) 4084 defer s.Shutdown() 4085 4086 opts := s.getOpts() 4087 if cer := opts.ConnectErrorReports; cer != DEFAULT_CONNECT_ERROR_REPORTS { 4088 t.Fatalf("Expected ConnectErrorReports to be %v, got %v", DEFAULT_CONNECT_ERROR_REPORTS, cer) 4089 } 4090 if rer := opts.ReconnectErrorReports; rer != DEFAULT_RECONNECT_ERROR_REPORTS { 4091 t.Fatalf("Expected ReconnectErrorReports to be %v, got %v", DEFAULT_RECONNECT_ERROR_REPORTS, rer) 4092 } 4093 4094 changeCurrentConfigContentWithNewContent(t, conf, 4095 []byte(fmt.Sprintf(template, "connect_error_reports: 2", "reconnect_error_reports: 3"))) 4096 4097 if err := s.Reload(); err != nil { 4098 t.Fatalf("Error during reload: %v", err) 4099 } 4100 4101 opts = s.getOpts() 4102 if cer := opts.ConnectErrorReports; cer != 2 { 4103 t.Fatalf("Expected ConnectErrorReports to be %v, got %v", 2, cer) 4104 } 4105 if rer := opts.ReconnectErrorReports; rer != 3 { 4106 t.Fatalf("Expected ReconnectErrorReports to be %v, got %v", 3, rer) 4107 } 4108 } 4109 4110 func TestConfigReloadAuthDoesNotBreakRouteInterest(t *testing.T) { 4111 s, opts := RunServerWithConfig("./configs/seed_tls.conf") 4112 defer s.Shutdown() 4113 4114 // Create client and sub interest on seed server. 4115 urlSeed := fmt.Sprintf("nats://%s:%d/", opts.Host, opts.Port) 4116 nc, err := nats.Connect(urlSeed) 4117 if err != nil { 4118 t.Fatalf("Error creating client: %v\n", err) 4119 } 4120 defer nc.Close() 4121 4122 ch := make(chan bool) 4123 nc.Subscribe("foo", func(m *nats.Msg) { ch <- true }) 4124 nc.Flush() 4125 4126 // Use this to check for message. 4127 checkForMsg := func() { 4128 t.Helper() 4129 select { 4130 case <-ch: 4131 case <-time.After(2 * time.Second): 4132 t.Fatal("Timeout waiting for message across route") 4133 } 4134 } 4135 4136 // Create second server and form cluster. We will send from here. 4137 urlRoute := fmt.Sprintf("nats://%s:%d", opts.Cluster.Host, opts.Cluster.Port) 4138 optsA := nextServerOpts(opts) 4139 optsA.Routes = RoutesFromStr(urlRoute) 4140 4141 sa := RunServer(optsA) 4142 defer sa.Shutdown() 4143 4144 checkClusterFormed(t, s, sa) 4145 checkSubInterest(t, sa, globalAccountName, "foo", time.Second) 4146 4147 // Create second client and send message from this one. Interest should be here. 4148 urlA := fmt.Sprintf("nats://%s:%d/", optsA.Host, optsA.Port) 4149 nc2, err := nats.Connect(urlA) 4150 if err != nil { 4151 t.Fatalf("Error creating client: %v\n", err) 4152 } 4153 defer nc2.Close() 4154 4155 // Check that we can send messages. 4156 nc2.Publish("foo", nil) 4157 checkForMsg() 4158 4159 // Now shutdown nc2 and srvA. 4160 nc2.Close() 4161 sa.Shutdown() 4162 4163 // Now force reload on seed server of auth. 4164 s.reloadAuthorization() 4165 4166 // Restart both server A and client 2. 4167 sa = RunServer(optsA) 4168 defer sa.Shutdown() 4169 4170 checkClusterFormed(t, s, sa) 4171 checkSubInterest(t, sa, globalAccountName, "foo", time.Second) 4172 4173 nc2, err = nats.Connect(urlA) 4174 if err != nil { 4175 t.Fatalf("Error creating client: %v\n", err) 4176 } 4177 defer nc2.Close() 4178 4179 // Check that we can still send messages. 4180 nc2.Publish("foo", nil) 4181 checkForMsg() 4182 } 4183 4184 func TestConfigReloadAccountResolverTLSConfig(t *testing.T) { 4185 kp, _ := nkeys.FromSeed(oSeed) 4186 akp, _ := nkeys.CreateAccount() 4187 apub, _ := akp.PublicKey() 4188 nac := jwt.NewAccountClaims(apub) 4189 ajwt, err := nac.Encode(kp) 4190 if err != nil { 4191 t.Fatalf("Error generating account JWT: %v", err) 4192 } 4193 pub, _ := kp.PublicKey() 4194 4195 tc := &TLSConfigOpts{ 4196 CertFile: "../test/configs/certs/server-cert.pem", 4197 KeyFile: "../test/configs/certs/server-key.pem", 4198 CaFile: "../test/configs/certs/ca.pem", 4199 } 4200 tlsConfig, err := GenTLSConfig(tc) 4201 if err != nil { 4202 t.Fatalf("Error generating tls config: %v", err) 4203 } 4204 ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 4205 w.Write([]byte(ajwt)) 4206 })) 4207 ts.TLS = tlsConfig 4208 ts.StartTLS() 4209 defer ts.Close() 4210 // Set a dummy logger to prevent tls bad certificate output to stderr. 4211 ts.Config.ErrorLog = log.New(&bytes.Buffer{}, "", 0) 4212 4213 confTemplate := ` 4214 listen: -1 4215 trusted_keys: %s 4216 resolver: URL("%s/ngs/v1/accounts/jwt/") 4217 %s 4218 ` 4219 conf := createConfFile(t, []byte(fmt.Sprintf(confTemplate, pub, ts.URL, ` 4220 resolver_tls { 4221 cert_file: "../test/configs/certs/client-cert.pem" 4222 key_file: "../test/configs/certs/client-key.pem" 4223 ca_file: "../test/configs/certs/ca.pem" 4224 } 4225 `))) 4226 4227 s, _ := RunServerWithConfig(conf) 4228 defer s.Shutdown() 4229 4230 changeCurrentConfigContentWithNewContent(t, conf, []byte(fmt.Sprintf(confTemplate, pub, ts.URL, ""))) 4231 if err := s.Reload(); err != nil { 4232 t.Fatalf("Error during reload: %v", err) 4233 } 4234 4235 if _, err := s.LookupAccount(apub); err == nil { 4236 t.Fatal("Expected error during lookup, did not get one") 4237 } 4238 4239 changeCurrentConfigContentWithNewContent(t, conf, []byte(fmt.Sprintf(confTemplate, pub, ts.URL, ` 4240 resolver_tls { 4241 insecure: true 4242 } 4243 `))) 4244 if err := s.Reload(); err != nil { 4245 t.Fatalf("Error during reload: %v", err) 4246 } 4247 4248 acc, err := s.LookupAccount(apub) 4249 if err != nil { 4250 t.Fatalf("Error during lookup: %v", err) 4251 } 4252 if acc == nil { 4253 t.Fatalf("Expected to receive an account") 4254 } 4255 if acc.Name != apub { 4256 t.Fatalf("Account name did not match claim key") 4257 } 4258 } 4259 4260 func TestConfigReloadLogging(t *testing.T) { 4261 // This test basically starts a server and causes it's configuration to be reloaded 3 times. 4262 // Each time, a new log file is created and trace levels are turned, off - on - off. 4263 4264 // At the end of the test, all 3 log files are inspected for certain traces. 4265 countMatches := func(log []byte, stmts ...string) int { 4266 matchCnt := 0 4267 for _, stmt := range stmts { 4268 if strings.Contains(string(log), stmt) { 4269 matchCnt++ 4270 } 4271 } 4272 return matchCnt 4273 } 4274 4275 traces := []string{"[TRC]", "[DBG]", "SYSTEM", "MSG_PAYLOAD", "$SYS.SERVER.ACCOUNT"} 4276 4277 didTrace := func(log []byte) bool { 4278 return countMatches(log, "[INF] Reloaded server configuration") == 1 4279 } 4280 4281 tracingAbsent := func(log []byte) bool { 4282 return countMatches(log, traces...) == 0 && didTrace(log) 4283 } 4284 4285 tracingPresent := func(log []byte) bool { 4286 return len(traces) == countMatches(log, traces...) && didTrace(log) 4287 } 4288 4289 check := func(filename string, valid func([]byte) bool) { 4290 t.Helper() 4291 log, err := os.ReadFile(filename) 4292 if err != nil { 4293 t.Fatalf("Error reading log file %s: %v\n", filename, err) 4294 } 4295 if !valid(log) { 4296 t.Fatalf("%s is not valid: %s", filename, log) 4297 } 4298 //t.Logf("%s contains: %s\n", filename, log) 4299 } 4300 4301 // common configuration setting up system accounts. trace_verbose needs this to cause traces 4302 commonCfg := ` 4303 port: -1 4304 system_account: sys 4305 accounts { 4306 sys { users = [ {user: sys, pass: "" } ] } 4307 nats.io: { users = [ { user : bar, pass: "pwd" } ] } 4308 } 4309 ` 4310 4311 conf := createConfFile(t, []byte(commonCfg)) 4312 4313 defer removeFile(t, "off-pre.log") 4314 defer removeFile(t, "on.log") 4315 defer removeFile(t, "off-post.log") 4316 4317 s, opts := RunServerWithConfig(conf) 4318 defer s.Shutdown() 4319 4320 reload := func(change string) { 4321 t.Helper() 4322 changeCurrentConfigContentWithNewContent(t, conf, []byte(commonCfg+` 4323 `+change+` 4324 `)) 4325 4326 if err := s.Reload(); err != nil { 4327 t.Fatalf("Error during reload: %v", err) 4328 } 4329 } 4330 4331 traffic := func(cnt int) { 4332 t.Helper() 4333 // Create client and sub interest on server and create traffic 4334 urlSeed := fmt.Sprintf("nats://bar:pwd@%s:%d/", opts.Host, opts.Port) 4335 nc, err := nats.Connect(urlSeed) 4336 if err != nil { 4337 t.Fatalf("Error creating client: %v\n", err) 4338 } 4339 defer nc.Close() 4340 4341 msgs := make(chan *nats.Msg, 1) 4342 defer close(msgs) 4343 4344 sub, err := nc.ChanSubscribe("foo", msgs) 4345 if err != nil { 4346 t.Fatalf("Error creating subscriber: %v\n", err) 4347 } 4348 4349 nc.Flush() 4350 4351 for i := 0; i < cnt; i++ { 4352 if err := nc.Publish("foo", []byte("bar")); err == nil { 4353 <-msgs 4354 } 4355 } 4356 4357 sub.Unsubscribe() 4358 nc.Close() 4359 } 4360 4361 reload("log_file: off-pre.log") 4362 4363 traffic(10) // generate NO trace/debug entries in off-pre.log 4364 4365 reload(` 4366 log_file: on.log 4367 debug: true 4368 trace_verbose: true 4369 `) 4370 4371 traffic(10) // generate trace/debug entries in on.log 4372 4373 reload(` 4374 log_file: off-post.log 4375 debug: false 4376 trace_verbose: false 4377 `) 4378 4379 traffic(10) // generate trace/debug entries in off-post.log 4380 4381 // check resulting log files for expected content 4382 check("off-pre.log", tracingAbsent) 4383 check("on.log", tracingPresent) 4384 check("off-post.log", tracingAbsent) 4385 } 4386 4387 func TestConfigReloadValidate(t *testing.T) { 4388 confFileName := createConfFile(t, []byte(` 4389 listen: "127.0.0.1:-1" 4390 no_auth_user: a 4391 authorization { 4392 users [ 4393 {user: "a", password: "a"}, 4394 {user: "b", password: "b"} 4395 ] 4396 } 4397 `)) 4398 srv, _ := RunServerWithConfig(confFileName) 4399 if srv == nil { 4400 t.Fatal("Server did not start") 4401 } 4402 // Induce error by removing the user no_auth_user points to 4403 changeCurrentConfigContentWithNewContent(t, confFileName, []byte(` 4404 listen: "127.0.0.1:-1" 4405 no_auth_user: a 4406 authorization { 4407 users [ 4408 {user: "b", password: "b"} 4409 ] 4410 } 4411 `)) 4412 if err := srv.Reload(); err == nil { 4413 t.Fatal("Expected error on reload, got none") 4414 } else if strings.HasPrefix(err.Error(), " no_auth_user:") { 4415 t.Logf("Expected no_auth_user error, got different one %s", err) 4416 } 4417 srv.Shutdown() 4418 } 4419 4420 func TestConfigReloadAccounts(t *testing.T) { 4421 conf := createConfFile(t, []byte(` 4422 listen: "127.0.0.1:-1" 4423 system_account: SYS 4424 accounts { 4425 SYS { 4426 users = [ 4427 {user: sys, password: pwd} 4428 ] 4429 } 4430 ACC { 4431 users = [ 4432 {user: usr, password: pwd} 4433 ] 4434 } 4435 acc_deleted_after_reload_will_trigger_reload_of_all_accounts { 4436 users = [ 4437 {user: notused, password: soon} 4438 ] 4439 } 4440 } 4441 `)) 4442 s, o := RunServerWithConfig(conf) 4443 defer s.Shutdown() 4444 4445 urlSys := fmt.Sprintf("nats://sys:pwd@%s:%d", o.Host, o.Port) 4446 urlUsr := fmt.Sprintf("nats://usr:pwd@%s:%d", o.Host, o.Port) 4447 oldAcci, ok := s.accounts.Load("SYS") 4448 if !ok { 4449 t.Fatal("No SYS account") 4450 } 4451 oldAcc := oldAcci.(*Account) 4452 4453 testSrvState := func(oldAcc *Account) { 4454 t.Helper() 4455 sysAcc := s.SystemAccount() 4456 s.mu.Lock() 4457 defer s.mu.Unlock() 4458 if s.sys == nil || sysAcc == nil { 4459 t.Fatal("Expected sys.account to be non-nil") 4460 } 4461 if sysAcc.Name != "SYS" { 4462 t.Fatal("Found wrong sys.account") 4463 } 4464 if s.opts.SystemAccount != "SYS" { 4465 t.Fatal("Found wrong sys.account") 4466 } 4467 ai, ok := s.accounts.Load(s.opts.SystemAccount) 4468 if !ok { 4469 t.Fatalf("System account %q not found in s.accounts map", s.opts.SystemAccount) 4470 } 4471 acc := ai.(*Account) 4472 if acc != oldAcc { 4473 t.Fatalf("System account pointer was changed during reload, was %p now %p", oldAcc, acc) 4474 } 4475 if s.sys.client == nil { 4476 t.Fatal("Expected sys.client to be non-nil") 4477 } 4478 s.sys.client.mu.Lock() 4479 defer s.sys.client.mu.Unlock() 4480 if s.sys.client.acc.Name != "SYS" { 4481 t.Fatal("Found wrong sys.account") 4482 } 4483 if s.sys.client.echo { 4484 t.Fatal("Internal clients should always have echo false") 4485 } 4486 s.sys.account.mu.Lock() 4487 if _, ok := s.sys.account.clients[s.sys.client]; !ok { 4488 s.sys.account.mu.Unlock() 4489 t.Fatal("internal client not present") 4490 } 4491 s.sys.account.mu.Unlock() 4492 } 4493 4494 // Below tests use connection names so that they can be checked for. 4495 // The test subscribes to ACC only. This avoids receiving own messages. 4496 subscribe := func(name string) (*nats.Conn, *nats.Subscription, *nats.Subscription) { 4497 t.Helper() 4498 c, err := nats.Connect(urlSys, nats.Name(name)) 4499 if err != nil { 4500 t.Fatalf("Error on connect: %v", err) 4501 } 4502 subCon, err := c.SubscribeSync("$SYS.ACCOUNT.ACC.CONNECT") 4503 if err != nil { 4504 t.Fatalf("Error on subscribe CONNECT: %v", err) 4505 } 4506 subDis, err := c.SubscribeSync("$SYS.ACCOUNT.ACC.DISCONNECT") 4507 if err != nil { 4508 t.Fatalf("Error on subscribe DISCONNECT: %v", err) 4509 } 4510 c.Flush() 4511 return c, subCon, subDis 4512 } 4513 recv := func(name string, sub *nats.Subscription) { 4514 t.Helper() 4515 if msg, err := sub.NextMsg(1 * time.Second); err != nil { 4516 t.Fatalf("%s Error on next: %v", name, err) 4517 } else { 4518 cMsg := ConnectEventMsg{} 4519 json.Unmarshal(msg.Data, &cMsg) 4520 if cMsg.Client.Name != name { 4521 t.Fatalf("%s wrong message: %s", name, string(msg.Data)) 4522 } 4523 } 4524 } 4525 triggerSysEvent := func(name string, subs []*nats.Subscription) { 4526 t.Helper() 4527 ncs1, err := nats.Connect(urlUsr, nats.Name(name)) 4528 if err != nil { 4529 t.Fatalf("Error on connect: %v", err) 4530 } 4531 ncs1.Close() 4532 for _, sub := range subs { 4533 recv(name, sub) 4534 // Make sure they are empty. 4535 if pending, _, _ := sub.Pending(); pending != 0 { 4536 t.Fatalf("Expected no pending, got %d for %+v", pending, sub) 4537 } 4538 } 4539 } 4540 4541 testSrvState(oldAcc) 4542 c1, s1C, s1D := subscribe("SYS1") 4543 defer c1.Close() 4544 defer s1C.Unsubscribe() 4545 defer s1D.Unsubscribe() 4546 triggerSysEvent("BEFORE1", []*nats.Subscription{s1C, s1D}) 4547 triggerSysEvent("BEFORE2", []*nats.Subscription{s1C, s1D}) 4548 4549 // Remove account to trigger account reload 4550 reloadUpdateConfig(t, s, conf, ` 4551 listen: "127.0.0.1:-1" 4552 system_account: SYS 4553 accounts { 4554 SYS { 4555 users = [ 4556 {user: sys, password: pwd} 4557 ] 4558 } 4559 ACC { 4560 users = [ 4561 {user: usr, password: pwd} 4562 ] 4563 } 4564 } 4565 `) 4566 4567 testSrvState(oldAcc) 4568 c2, s2C, s2D := subscribe("SYS2") 4569 defer c2.Close() 4570 defer s2C.Unsubscribe() 4571 defer s2D.Unsubscribe() 4572 // test new and existing subscriptions 4573 triggerSysEvent("AFTER1", []*nats.Subscription{s1C, s1D, s2C, s2D}) 4574 triggerSysEvent("AFTER2", []*nats.Subscription{s1C, s1D, s2C, s2D}) 4575 } 4576 4577 func TestConfigReloadDefaultSystemAccount(t *testing.T) { 4578 conf := createConfFile(t, []byte(` 4579 listen: "127.0.0.1:-1" 4580 accounts { 4581 ACC { 4582 users = [ 4583 {user: usr, password: pwd} 4584 ] 4585 } 4586 } 4587 `)) 4588 s, _ := RunServerWithConfig(conf) 4589 defer s.Shutdown() 4590 4591 sysAcc := s.SystemAccount() 4592 if sysAcc == nil { 4593 t.Fatalf("Expected system account to be present") 4594 } 4595 numSubs := sysAcc.TotalSubs() 4596 4597 sname := sysAcc.GetName() 4598 testInAccounts := func() { 4599 t.Helper() 4600 var found bool 4601 s.accounts.Range(func(k, v interface{}) bool { 4602 acc := v.(*Account) 4603 if acc.GetName() == sname { 4604 found = true 4605 return false 4606 } 4607 return true 4608 }) 4609 if !found { 4610 t.Fatalf("System account not found in accounts list") 4611 } 4612 } 4613 testInAccounts() 4614 4615 if err := s.Reload(); err != nil { 4616 t.Fatalf("Unexpected error reloading: %v", err) 4617 } 4618 4619 sysAcc = s.SystemAccount() 4620 if sysAcc == nil { 4621 t.Fatalf("Expected system account to still be present") 4622 } 4623 if sysAcc.TotalSubs() != numSubs { 4624 t.Fatalf("Expected %d subs, got %d", numSubs, sysAcc.TotalSubs()) 4625 } 4626 testInAccounts() 4627 } 4628 4629 func TestConfigReloadAccountMappings(t *testing.T) { 4630 conf := createConfFile(t, []byte(` 4631 listen: "127.0.0.1:-1" 4632 accounts { 4633 ACC { 4634 users = [{user: usr, password: pwd}] 4635 mappings = { foo: bar } 4636 } 4637 } 4638 `)) 4639 s, opts := RunServerWithConfig(conf) 4640 defer s.Shutdown() 4641 4642 reloadUpdateConfig(t, s, conf, ` 4643 listen: "127.0.0.1:-1" 4644 accounts { 4645 ACC { 4646 users = [{user: usr, password: pwd}] 4647 mappings = { foo: baz } 4648 } 4649 } 4650 `) 4651 4652 nc := natsConnect(t, fmt.Sprintf("nats://usr:pwd@%s:%d", opts.Host, opts.Port)) 4653 defer nc.Close() 4654 4655 fsub, _ := nc.SubscribeSync("foo") 4656 sub, _ := nc.SubscribeSync("baz") 4657 nc.Publish("foo", nil) 4658 nc.Flush() 4659 4660 checkPending := func(sub *nats.Subscription, expected int) { 4661 t.Helper() 4662 if n, _, _ := sub.Pending(); n != expected { 4663 t.Fatalf("Expected %d msgs for %q, but got %d", expected, sub.Subject, n) 4664 } 4665 } 4666 checkPending(fsub, 0) 4667 checkPending(sub, 1) 4668 4669 // Drain it off 4670 if _, err := sub.NextMsg(2 * time.Second); err != nil { 4671 t.Fatalf("Error receiving msg: %v", err) 4672 } 4673 4674 reloadUpdateConfig(t, s, conf, ` 4675 listen: "127.0.0.1:-1" 4676 accounts { 4677 ACC { 4678 users = [{user: usr, password: pwd}] 4679 } 4680 } 4681 `) 4682 4683 nc.Publish("foo", nil) 4684 nc.Flush() 4685 4686 checkPending(fsub, 1) 4687 checkPending(sub, 0) 4688 } 4689 4690 func TestConfigReloadWithSysAccountOnly(t *testing.T) { 4691 conf := createConfFile(t, []byte(` 4692 listen: "127.0.0.1:-1" 4693 accounts { 4694 $SYS { 4695 users = [{user: "system",pass: "password"}, {user: "system2",pass: "password2"}] 4696 } 4697 } 4698 `)) 4699 defer os.Remove(conf) 4700 s, _ := RunServerWithConfig(conf) 4701 defer s.Shutdown() 4702 4703 dch := make(chan struct{}, 1) 4704 nc := natsConnect(t, 4705 s.ClientURL(), 4706 nats.DisconnectErrHandler(func(_ *nats.Conn, _ error) { 4707 dch <- struct{}{} 4708 }), 4709 nats.NoCallbacksAfterClientClose()) 4710 defer nc.Close() 4711 4712 // Just reload... 4713 if err := s.Reload(); err != nil { 4714 t.Fatalf("Error on reload: %v", err) 4715 } 4716 4717 // Make sure we did not get disconnected 4718 select { 4719 case <-dch: 4720 t.Fatal("Got disconnected!") 4721 case <-time.After(500 * time.Millisecond): 4722 // ok 4723 } 4724 } 4725 4726 func TestConfigReloadRouteImportPermissionsWithAccounts(t *testing.T) { 4727 for _, test := range []struct { 4728 name string 4729 poolSize string 4730 accounts string 4731 }{ 4732 {"regular", "pool_size: -1", _EMPTY_}, 4733 {"pooling", "pool_size: 5", _EMPTY_}, 4734 {"per-account", _EMPTY_, "accounts: [\"A\"]"}, 4735 {"pool and per-account", "pool_size: 3", "accounts: [\"A\"]"}, 4736 } { 4737 t.Run("import "+test.name, func(t *testing.T) { 4738 confATemplate := ` 4739 server_name: "A" 4740 port: -1 4741 accounts { 4742 A { users: [{user: "user1", password: "pwd"}] } 4743 B { users: [{user: "user2", password: "pwd"}] } 4744 C { users: [{user: "user3", password: "pwd"}] } 4745 D { users: [{user: "user4", password: "pwd"}] } 4746 } 4747 cluster { 4748 name: "local" 4749 listen: 127.0.0.1:-1 4750 permissions { 4751 import { 4752 allow: %s 4753 } 4754 export { 4755 allow: ">" 4756 } 4757 } 4758 %s 4759 %s 4760 } 4761 ` 4762 confA := createConfFile(t, []byte(fmt.Sprintf(confATemplate, `"foo"`, test.poolSize, test.accounts))) 4763 srva, optsA := RunServerWithConfig(confA) 4764 defer srva.Shutdown() 4765 4766 confBTemplate := ` 4767 server_name: "B" 4768 port: -1 4769 accounts { 4770 A { users: [{user: "user1", password: "pwd"}] } 4771 B { users: [{user: "user2", password: "pwd"}] } 4772 C { users: [{user: "user3", password: "pwd"}] } 4773 D { users: [{user: "user4", password: "pwd"}] } 4774 } 4775 cluster { 4776 listen: 127.0.0.1:-1 4777 name: "local" 4778 permissions { 4779 import { 4780 allow: %s 4781 } 4782 export { 4783 allow: ">" 4784 } 4785 } 4786 routes = [ 4787 "nats://127.0.0.1:%d" 4788 ] 4789 %s 4790 %s 4791 } 4792 ` 4793 confB := createConfFile(t, []byte(fmt.Sprintf(confBTemplate, `"foo"`, optsA.Cluster.Port, test.poolSize, test.accounts))) 4794 srvb, _ := RunServerWithConfig(confB) 4795 defer srvb.Shutdown() 4796 4797 checkClusterFormed(t, srva, srvb) 4798 4799 ncA := natsConnect(t, srva.ClientURL(), nats.UserInfo("user1", "pwd")) 4800 defer ncA.Close() 4801 4802 sub1Foo := natsSubSync(t, ncA, "foo") 4803 sub2Foo := natsSubSync(t, ncA, "foo") 4804 4805 sub1Bar := natsSubSync(t, ncA, "bar") 4806 sub2Bar := natsSubSync(t, ncA, "bar") 4807 4808 natsFlush(t, ncA) 4809 4810 checkSubInterest(t, srvb, "A", "foo", 2*time.Second) 4811 checkSubNoInterest(t, srvb, "A", "bar", 2*time.Second) 4812 4813 ncB := natsConnect(t, srvb.ClientURL(), nats.UserInfo("user1", "pwd")) 4814 defer ncB.Close() 4815 4816 check := func(sub *nats.Subscription, expected bool) { 4817 t.Helper() 4818 if expected { 4819 natsNexMsg(t, sub, time.Second) 4820 } else { 4821 if msg, err := sub.NextMsg(50 * time.Millisecond); err == nil { 4822 t.Fatalf("Should not have gotten the message, got %s/%s", msg.Subject, msg.Data) 4823 } 4824 } 4825 } 4826 4827 // Should receive on "foo" 4828 natsPub(t, ncB, "foo", []byte("foo1")) 4829 check(sub1Foo, true) 4830 check(sub2Foo, true) 4831 4832 // But not on "bar" 4833 natsPub(t, ncB, "bar", []byte("bar1")) 4834 check(sub1Bar, false) 4835 check(sub2Bar, false) 4836 4837 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, `"bar"`, test.poolSize, test.accounts)) 4838 reloadUpdateConfig(t, srvb, confB, fmt.Sprintf(confBTemplate, `"bar"`, optsA.Cluster.Port, test.poolSize, test.accounts)) 4839 4840 checkClusterFormed(t, srva, srvb) 4841 4842 checkSubNoInterest(t, srvb, "A", "foo", 2*time.Second) 4843 checkSubInterest(t, srvb, "A", "bar", 2*time.Second) 4844 4845 // Should not receive on foo 4846 natsPub(t, ncB, "foo", []byte("foo2")) 4847 check(sub1Foo, false) 4848 check(sub2Foo, false) 4849 4850 // Should be able to receive on bar 4851 natsPub(t, ncB, "bar", []byte("bar2")) 4852 check(sub1Bar, true) 4853 check(sub2Bar, true) 4854 4855 // Restore "foo" 4856 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, `"foo"`, test.poolSize, test.accounts)) 4857 reloadUpdateConfig(t, srvb, confB, fmt.Sprintf(confBTemplate, `"foo"`, optsA.Cluster.Port, test.poolSize, test.accounts)) 4858 4859 checkClusterFormed(t, srva, srvb) 4860 4861 checkSubInterest(t, srvb, "A", "foo", 2*time.Second) 4862 checkSubNoInterest(t, srvb, "A", "bar", 2*time.Second) 4863 4864 // Should receive on "foo" 4865 natsPub(t, ncB, "foo", []byte("foo3")) 4866 check(sub1Foo, true) 4867 check(sub2Foo, true) 4868 // But make sure there are no more than what we expect 4869 check(sub1Foo, false) 4870 check(sub2Foo, false) 4871 4872 // And now "bar" should fail 4873 natsPub(t, ncB, "bar", []byte("bar3")) 4874 check(sub1Bar, false) 4875 check(sub2Bar, false) 4876 }) 4877 } 4878 // Check export now 4879 for _, test := range []struct { 4880 name string 4881 poolSize string 4882 accounts string 4883 }{ 4884 {"regular", "pool_size: -1", _EMPTY_}, 4885 {"pooling", "pool_size: 5", _EMPTY_}, 4886 {"per-account", _EMPTY_, "accounts: [\"A\"]"}, 4887 {"pool and per-account", "pool_size: 3", "accounts: [\"A\"]"}, 4888 } { 4889 t.Run("export "+test.name, func(t *testing.T) { 4890 confATemplate := ` 4891 server_name: "A" 4892 port: -1 4893 accounts { 4894 A { users: [{user: "user1", password: "pwd"}] } 4895 B { users: [{user: "user2", password: "pwd"}] } 4896 C { users: [{user: "user3", password: "pwd"}] } 4897 D { users: [{user: "user4", password: "pwd"}] } 4898 } 4899 cluster { 4900 name: "local" 4901 listen: 127.0.0.1:-1 4902 permissions { 4903 import { 4904 allow: ">" 4905 } 4906 export { 4907 allow: %s 4908 } 4909 } 4910 %s 4911 %s 4912 } 4913 ` 4914 confA := createConfFile(t, []byte(fmt.Sprintf(confATemplate, `"foo"`, test.poolSize, test.accounts))) 4915 srva, optsA := RunServerWithConfig(confA) 4916 defer srva.Shutdown() 4917 4918 confBTemplate := ` 4919 server_name: "B" 4920 port: -1 4921 accounts { 4922 A { users: [{user: "user1", password: "pwd"}] } 4923 B { users: [{user: "user2", password: "pwd"}] } 4924 C { users: [{user: "user3", password: "pwd"}] } 4925 D { users: [{user: "user4", password: "pwd"}] } 4926 } 4927 cluster { 4928 listen: 127.0.0.1:-1 4929 name: "local" 4930 permissions { 4931 import { 4932 allow: ">" 4933 } 4934 export { 4935 allow: %s 4936 } 4937 } 4938 routes = [ 4939 "nats://127.0.0.1:%d" 4940 ] 4941 %s 4942 %s 4943 } 4944 ` 4945 confB := createConfFile(t, []byte(fmt.Sprintf(confBTemplate, `"foo"`, optsA.Cluster.Port, test.poolSize, test.accounts))) 4946 srvb, _ := RunServerWithConfig(confB) 4947 defer srvb.Shutdown() 4948 4949 checkClusterFormed(t, srva, srvb) 4950 4951 ncA := natsConnect(t, srva.ClientURL(), nats.UserInfo("user1", "pwd")) 4952 defer ncA.Close() 4953 4954 sub1Foo := natsSubSync(t, ncA, "foo") 4955 sub2Foo := natsSubSync(t, ncA, "foo") 4956 4957 sub1Bar := natsSubSync(t, ncA, "bar") 4958 sub2Bar := natsSubSync(t, ncA, "bar") 4959 4960 natsFlush(t, ncA) 4961 4962 checkSubInterest(t, srvb, "A", "foo", 2*time.Second) 4963 checkSubNoInterest(t, srvb, "A", "bar", 2*time.Second) 4964 4965 ncB := natsConnect(t, srvb.ClientURL(), nats.UserInfo("user1", "pwd")) 4966 defer ncB.Close() 4967 4968 check := func(sub *nats.Subscription, expected bool) { 4969 t.Helper() 4970 if expected { 4971 natsNexMsg(t, sub, time.Second) 4972 } else { 4973 if msg, err := sub.NextMsg(50 * time.Millisecond); err == nil { 4974 t.Fatalf("Should not have gotten the message, got %s/%s", msg.Subject, msg.Data) 4975 } 4976 } 4977 } 4978 4979 // Should receive on "foo" 4980 natsPub(t, ncB, "foo", []byte("foo1")) 4981 check(sub1Foo, true) 4982 check(sub2Foo, true) 4983 4984 // But not on "bar" 4985 natsPub(t, ncB, "bar", []byte("bar1")) 4986 check(sub1Bar, false) 4987 check(sub2Bar, false) 4988 4989 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, `["foo", "bar"]`, test.poolSize, test.accounts)) 4990 reloadUpdateConfig(t, srvb, confB, fmt.Sprintf(confBTemplate, `["foo", "bar"]`, optsA.Cluster.Port, test.poolSize, test.accounts)) 4991 4992 checkClusterFormed(t, srva, srvb) 4993 4994 checkSubInterest(t, srvb, "A", "foo", 2*time.Second) 4995 checkSubInterest(t, srvb, "A", "bar", 2*time.Second) 4996 4997 // Should receive on foo and bar 4998 natsPub(t, ncB, "foo", []byte("foo2")) 4999 check(sub1Foo, true) 5000 check(sub2Foo, true) 5001 5002 natsPub(t, ncB, "bar", []byte("bar2")) 5003 check(sub1Bar, true) 5004 check(sub2Bar, true) 5005 5006 // Remove "bar" 5007 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, `"foo"`, test.poolSize, test.accounts)) 5008 reloadUpdateConfig(t, srvb, confB, fmt.Sprintf(confBTemplate, `"foo"`, optsA.Cluster.Port, test.poolSize, test.accounts)) 5009 5010 checkClusterFormed(t, srva, srvb) 5011 5012 checkSubInterest(t, srvb, "A", "foo", 2*time.Second) 5013 checkSubNoInterest(t, srvb, "A", "bar", 2*time.Second) 5014 5015 // Should receive on "foo" 5016 natsPub(t, ncB, "foo", []byte("foo3")) 5017 check(sub1Foo, true) 5018 check(sub2Foo, true) 5019 // But make sure there are no more than what we expect 5020 check(sub1Foo, false) 5021 check(sub2Foo, false) 5022 5023 // And now "bar" should fail 5024 natsPub(t, ncB, "bar", []byte("bar3")) 5025 check(sub1Bar, false) 5026 check(sub2Bar, false) 5027 }) 5028 } 5029 } 5030 5031 func TestConfigReloadRoutePoolAndPerAccount(t *testing.T) { 5032 confATemplate := ` 5033 port: -1 5034 server_name: "A" 5035 accounts { 5036 A { users: [{user: "user1", password: "pwd"}] } 5037 B { users: [{user: "user2", password: "pwd"}] } 5038 C { users: [{user: "user3", password: "pwd"}] } 5039 D { users: [{user: "user4", password: "pwd"}] } 5040 } 5041 cluster { 5042 name: "local" 5043 listen: 127.0.0.1:-1 5044 %s 5045 %s 5046 } 5047 ` 5048 confA := createConfFile(t, []byte(fmt.Sprintf(confATemplate, "pool_size: 3", "accounts: [\"A\"]"))) 5049 srva, optsA := RunServerWithConfig(confA) 5050 defer srva.Shutdown() 5051 5052 confBCTemplate := ` 5053 port: -1 5054 server_name: "%s" 5055 accounts { 5056 A { users: [{user: "user1", password: "pwd"}] } 5057 B { users: [{user: "user2", password: "pwd"}] } 5058 C { users: [{user: "user3", password: "pwd"}] } 5059 D { users: [{user: "user4", password: "pwd"}] } 5060 } 5061 cluster { 5062 listen: 127.0.0.1:-1 5063 name: "local" 5064 routes = [ 5065 "nats://127.0.0.1:%d" 5066 ] 5067 %s 5068 %s 5069 } 5070 ` 5071 confB := createConfFile(t, []byte(fmt.Sprintf(confBCTemplate, "B", optsA.Cluster.Port, "pool_size: 3", "accounts: [\"A\"]"))) 5072 srvb, _ := RunServerWithConfig(confB) 5073 defer srvb.Shutdown() 5074 confC := createConfFile(t, []byte(fmt.Sprintf(confBCTemplate, "C", optsA.Cluster.Port, "pool_size: 3", "accounts: [\"A\"]"))) 5075 srvc, _ := RunServerWithConfig(confC) 5076 defer srvc.Shutdown() 5077 5078 checkClusterFormed(t, srva, srvb, srvc) 5079 5080 // We will also create subscriptions for accounts A, B and C on all sides 5081 // just to make sure that interest is properly propagated after a reload. 5082 // The conns slices will contain connections for accounts A on srva, srvb, 5083 // srvc, then B on srva, srvb, etc.. and the subs slices will contain 5084 // subscriptions for account A on foo on srva, bar on srvb, baz on srvc, 5085 // then for account B on foo on srva, etc... 5086 var conns []*nats.Conn 5087 var subs []*nats.Subscription 5088 for _, user := range []string{"user1", "user2", "user3"} { 5089 nc := natsConnect(t, srva.ClientURL(), nats.UserInfo(user, "pwd")) 5090 defer nc.Close() 5091 conns = append(conns, nc) 5092 sub := natsSubSync(t, nc, "foo") 5093 subs = append(subs, sub) 5094 nc = natsConnect(t, srvb.ClientURL(), nats.UserInfo(user, "pwd")) 5095 defer nc.Close() 5096 conns = append(conns, nc) 5097 sub = natsSubSync(t, nc, "bar") 5098 subs = append(subs, sub) 5099 nc = natsConnect(t, srvc.ClientURL(), nats.UserInfo(user, "pwd")) 5100 defer nc.Close() 5101 conns = append(conns, nc) 5102 sub = natsSubSync(t, nc, "baz") 5103 subs = append(subs, sub) 5104 } 5105 5106 checkCluster := func() { 5107 t.Helper() 5108 checkClusterFormed(t, srva, srvb, srvc) 5109 5110 for _, acc := range []string{"A", "B", "C"} { 5111 // On server A, there should be interest for bar/baz 5112 checkSubInterest(t, srva, acc, "bar", 2*time.Second) 5113 checkSubInterest(t, srva, acc, "baz", 2*time.Second) 5114 // On serer B, there should be interest on foo/baz 5115 checkSubInterest(t, srvb, acc, "foo", 2*time.Second) 5116 checkSubInterest(t, srvb, acc, "baz", 2*time.Second) 5117 // And on server C, interest on foo/bar 5118 checkSubInterest(t, srvc, acc, "foo", 2*time.Second) 5119 checkSubInterest(t, srvc, acc, "bar", 2*time.Second) 5120 } 5121 } 5122 checkCluster() 5123 5124 getAccRouteID := func(acc string) uint64 { 5125 s := srva 5126 var id uint64 5127 srvbId := srvb.ID() 5128 s.mu.RLock() 5129 if remotes, ok := s.accRoutes[acc]; ok { 5130 // For this test, we will take a single remote, say srvb 5131 if r := remotes[srvbId]; r != nil { 5132 r.mu.Lock() 5133 if string(r.route.accName) == acc { 5134 id = r.cid 5135 } 5136 r.mu.Unlock() 5137 } 5138 } 5139 s.mu.RUnlock() 5140 return id 5141 } 5142 // Capture the route for account "A" 5143 raid := getAccRouteID("A") 5144 if raid == 0 { 5145 t.Fatal("Did not find route for account A") 5146 } 5147 5148 getRouteIDForAcc := func(acc string) uint64 { 5149 s := srva 5150 a, _ := s.LookupAccount(acc) 5151 if a == nil { 5152 return 0 5153 } 5154 a.mu.RLock() 5155 pidx := a.routePoolIdx 5156 a.mu.RUnlock() 5157 var id uint64 5158 s.mu.RLock() 5159 // For this test, we will take a single remote, say srvb 5160 srvbId := srvb.ID() 5161 if conns, ok := s.routes[srvbId]; ok { 5162 if r := conns[pidx]; r != nil { 5163 r.mu.Lock() 5164 id = r.cid 5165 r.mu.Unlock() 5166 } 5167 } 5168 s.mu.RUnlock() 5169 return id 5170 } 5171 rbid := getRouteIDForAcc("B") 5172 if rbid == 0 { 5173 t.Fatal("Did not find route for account B") 5174 } 5175 rcid := getRouteIDForAcc("C") 5176 if rcid == 0 { 5177 t.Fatal("Did not find route for account C") 5178 } 5179 rdid := getRouteIDForAcc("D") 5180 if rdid == 0 { 5181 t.Fatal("Did not find route for account D") 5182 } 5183 5184 sendAndRecv := func(msg string) { 5185 t.Helper() 5186 for accIdx := 0; accIdx < 9; accIdx += 3 { 5187 natsPub(t, conns[accIdx], "bar", []byte(msg)) 5188 m := natsNexMsg(t, subs[accIdx+1], time.Second) 5189 checkMsg := func(m *nats.Msg, subj string) { 5190 t.Helper() 5191 if string(m.Data) != msg { 5192 t.Fatalf("For accIdx=%v, subject %q, expected message %q, got %q", accIdx, subj, msg, m.Data) 5193 } 5194 } 5195 checkMsg(m, "bar") 5196 natsPub(t, conns[accIdx+1], "baz", []byte(msg)) 5197 m = natsNexMsg(t, subs[accIdx+2], time.Second) 5198 checkMsg(m, "baz") 5199 natsPub(t, conns[accIdx+2], "foo", []byte(msg)) 5200 m = natsNexMsg(t, subs[accIdx], time.Second) 5201 checkMsg(m, "foo") 5202 } 5203 } 5204 sendAndRecv("0") 5205 5206 // Now add accounts "B" and "D" and do a config reload. 5207 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, "pool_size: 3", "accounts: [\"A\",\"B\",\"D\"]")) 5208 5209 // Even before reloading srvb and srvc, we should already have per-account 5210 // routes for accounts B and D being established. The accounts routePoolIdx 5211 // should be marked as transitioning. 5212 checkAccPoolIdx := func(s *Server, acc string, expected int) { 5213 t.Helper() 5214 checkFor(t, 2*time.Second, 50*time.Millisecond, func() error { 5215 s.mu.RLock() 5216 defer s.mu.RUnlock() 5217 if a, ok := s.accounts.Load(acc); ok { 5218 acc := a.(*Account) 5219 acc.mu.RLock() 5220 rpi := acc.routePoolIdx 5221 acc.mu.RUnlock() 5222 if rpi != expected { 5223 return fmt.Errorf("Server %q - Account %q routePoolIdx should be %v, but is %v", s, acc, expected, rpi) 5224 } 5225 return nil 5226 } 5227 return fmt.Errorf("Server %q - Account %q not found", s, acc) 5228 }) 5229 } 5230 checkRoutePerAccAlreadyEstablished := func(s *Server, acc string) { 5231 t.Helper() 5232 checkFor(t, 2*time.Second, 50*time.Millisecond, func() error { 5233 s.mu.RLock() 5234 defer s.mu.RUnlock() 5235 if _, ok := s.accRoutes[acc]; !ok { 5236 return fmt.Errorf("Route for account %q still not established", acc) 5237 } 5238 return nil 5239 }) 5240 checkAccPoolIdx(s, acc, accTransitioningToDedicatedRoute) 5241 } 5242 // Check srvb and srvc for both accounts. 5243 for _, s := range []*Server{srvb, srvc} { 5244 for _, acc := range []string{"B", "D"} { 5245 checkRoutePerAccAlreadyEstablished(s, acc) 5246 } 5247 } 5248 // On srva, the accounts should already have their routePoolIdx set to 5249 // the accDedicatedRoute value. 5250 for _, acc := range []string{"B", "D"} { 5251 checkAccPoolIdx(srva, acc, accDedicatedRoute) 5252 } 5253 // Now reload the other servers 5254 reloadUpdateConfig(t, srvb, confB, fmt.Sprintf(confBCTemplate, "B", optsA.Cluster.Port, "pool_size: 3", "accounts: [\"A\",\"B\",\"D\"]")) 5255 reloadUpdateConfig(t, srvc, confC, fmt.Sprintf(confBCTemplate, "C", optsA.Cluster.Port, "pool_size: 3", "accounts: [\"A\",\"B\",\"D\"]")) 5256 5257 checkCluster() 5258 // Now check that the accounts B and D are no longer transitioning 5259 for _, s := range []*Server{srva, srvb, srvc} { 5260 for _, acc := range []string{"B", "D"} { 5261 checkAccPoolIdx(s, acc, accDedicatedRoute) 5262 } 5263 } 5264 5265 checkRouteForADidNotChange := func() { 5266 t.Helper() 5267 if id := getAccRouteID("A"); id != raid { 5268 t.Fatalf("Route id for account 'A' was %d, is now %d", raid, id) 5269 } 5270 } 5271 // Verify that the route for account "A" did not change. 5272 checkRouteForADidNotChange() 5273 5274 // Verify that account "B" has now its own route 5275 if id := getAccRouteID("B"); id == 0 { 5276 t.Fatal("Did not find route for account B") 5277 } 5278 // Same for "D". 5279 if id := getAccRouteID("D"); id == 0 { 5280 t.Fatal("Did not find route for account D") 5281 } 5282 5283 checkRouteStillPresent := func(id uint64) { 5284 t.Helper() 5285 srva.mu.RLock() 5286 defer srva.mu.RUnlock() 5287 srvbId := srvb.ID() 5288 for _, r := range srva.routes[srvbId] { 5289 if r != nil { 5290 r.mu.Lock() 5291 found := r.cid == id 5292 r.mu.Unlock() 5293 if found { 5294 return 5295 } 5296 } 5297 } 5298 t.Fatalf("Route id %v has been disconnected", id) 5299 } 5300 // Verify that routes that were dealing with "B", and "D" were not disconnected. 5301 // Of course, since "C" was not involved, that route should still be present too. 5302 checkRouteStillPresent(rbid) 5303 checkRouteStillPresent(rcid) 5304 checkRouteStillPresent(rdid) 5305 5306 sendAndRecv("1") 5307 5308 // Now remove "B" and "D" and verify that route for "A" did not change. 5309 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, "pool_size: 3", "accounts: [\"A\"]")) 5310 reloadUpdateConfig(t, srvb, confB, fmt.Sprintf(confBCTemplate, "B", optsA.Cluster.Port, "pool_size: 3", "accounts: [\"A\"]")) 5311 reloadUpdateConfig(t, srvc, confC, fmt.Sprintf(confBCTemplate, "C", optsA.Cluster.Port, "pool_size: 3", "accounts: [\"A\"]")) 5312 5313 checkCluster() 5314 5315 // Verify that the route for account "A" did not change. 5316 checkRouteForADidNotChange() 5317 5318 // Verify that there is no dedicated route for account "B" 5319 if id := getAccRouteID("B"); id != 0 { 5320 t.Fatal("Should not have found a route for account B") 5321 } 5322 // It should instead be in one of the pooled route, and same 5323 // than it was before. 5324 if id := getRouteIDForAcc("B"); id != rbid { 5325 t.Fatalf("Account B's route was %d, it is now %d", rbid, id) 5326 } 5327 // Same for "D" 5328 if id := getAccRouteID("D"); id != 0 { 5329 t.Fatal("Should not have found a route for account D") 5330 } 5331 if id := getRouteIDForAcc("D"); id != rdid { 5332 t.Fatalf("Account D's route was %d, it is now %d", rdid, id) 5333 } 5334 5335 sendAndRecv("2") 5336 5337 // Finally, change pool size and make sure that routes handling B, C and D 5338 // were disconnected/reconnected, and that A did not change. 5339 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, "pool_size: 5", "accounts: [\"A\"]")) 5340 reloadUpdateConfig(t, srvb, confB, fmt.Sprintf(confBCTemplate, "B", optsA.Cluster.Port, "pool_size: 5", "accounts: [\"A\"]")) 5341 reloadUpdateConfig(t, srvc, confC, fmt.Sprintf(confBCTemplate, "C", optsA.Cluster.Port, "pool_size: 5", "accounts: [\"A\"]")) 5342 5343 checkCluster() 5344 5345 checkRouteForADidNotChange() 5346 5347 checkRouteDisconnected := func(acc string, oldID uint64) { 5348 t.Helper() 5349 if id := getRouteIDForAcc(acc); id == oldID { 5350 t.Fatalf("Route that was handling account %q did not change", acc) 5351 } 5352 } 5353 checkRouteDisconnected("B", rbid) 5354 checkRouteDisconnected("C", rcid) 5355 checkRouteDisconnected("D", rdid) 5356 5357 sendAndRecv("3") 5358 5359 // Now check that there were no duplicates and that all subs have 0 pending messages. 5360 for i, sub := range subs { 5361 if n, _, _ := sub.Pending(); n != 0 { 5362 t.Fatalf("Expected 0 pending messages, got %v for accIdx=%d sub=%q", n, i, sub.Subject) 5363 } 5364 } 5365 } 5366 5367 func TestConfigReloadRoutePoolCannotBeDisabledIfAccountsPresent(t *testing.T) { 5368 tmpl := ` 5369 port: -1 5370 server_name: "%s" 5371 accounts { 5372 A { users: [{user: "user1", password: "pwd"}] } 5373 B { users: [{user: "user2", password: "pwd"}] } 5374 } 5375 cluster { 5376 name: "local" 5377 listen: 127.0.0.1:-1 5378 %s 5379 %s 5380 %s 5381 } 5382 ` 5383 conf1 := createConfFile(t, []byte(fmt.Sprintf(tmpl, "A", "accounts: [\"A\"]", _EMPTY_, _EMPTY_))) 5384 s1, o1 := RunServerWithConfig(conf1) 5385 defer s1.Shutdown() 5386 5387 conf2 := createConfFile(t, []byte(fmt.Sprintf(tmpl, "B", "accounts: [\"A\"]", _EMPTY_, 5388 fmt.Sprintf("routes: [\"nats://127.0.0.1:%d\"]", o1.Cluster.Port)))) 5389 s2, _ := RunServerWithConfig(conf2) 5390 defer s2.Shutdown() 5391 5392 checkClusterFormed(t, s1, s2) 5393 5394 err := os.WriteFile(conf1, []byte(fmt.Sprintf(tmpl, "A", "accounts: [\"A\"]", "pool_size: -1", _EMPTY_)), 0666) 5395 require_NoError(t, err) 5396 if err := s1.Reload(); err == nil || !strings.Contains(err.Error(), "accounts") { 5397 t.Fatalf("Expected error regarding presence of accounts, got %v", err) 5398 } 5399 5400 // Now remove the accounts too and reload, this should work 5401 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl, "A", _EMPTY_, "pool_size: -1", _EMPTY_)) 5402 reloadUpdateConfig(t, s2, conf2, fmt.Sprintf(tmpl, "B", _EMPTY_, "pool_size: -1", fmt.Sprintf("routes: [\"nats://127.0.0.1:%d\"]", o1.Cluster.Port))) 5403 checkClusterFormed(t, s1, s2) 5404 5405 ncs2 := natsConnect(t, s2.ClientURL(), nats.UserInfo("user1", "pwd")) 5406 defer ncs2.Close() 5407 sub := natsSubSync(t, ncs2, "foo") 5408 checkSubInterest(t, s1, "A", "foo", time.Second) 5409 5410 ncs1 := natsConnect(t, s1.ClientURL(), nats.UserInfo("user1", "pwd")) 5411 defer ncs1.Close() 5412 natsPub(t, ncs1, "foo", []byte("hello")) 5413 natsNexMsg(t, sub, time.Second) 5414 5415 // Wait a bit and make sure there are no duplicates 5416 time.Sleep(50 * time.Millisecond) 5417 if n, _, _ := sub.Pending(); n != 0 { 5418 t.Fatalf("Expected no pending messages, got %v", n) 5419 } 5420 5421 // Finally, verify that the system account is no longer bound to 5422 // a dedicated route. For that matter, s.accRoutes should be nil. 5423 for _, s := range []*Server{s1, s2} { 5424 sys := s.SystemAccount() 5425 if sys == nil { 5426 t.Fatal("No system account found") 5427 } 5428 sys.mu.RLock() 5429 rpi := sys.routePoolIdx 5430 sys.mu.RUnlock() 5431 if rpi != 0 { 5432 t.Fatalf("Server %q - expected account's routePoolIdx to be 0, got %v", s, rpi) 5433 } 5434 s.mu.RLock() 5435 arNil := s.accRoutes == nil 5436 s.mu.RUnlock() 5437 if !arNil { 5438 t.Fatalf("Server %q - accRoutes expected to be nil, it was not", s) 5439 } 5440 } 5441 } 5442 5443 func TestConfigReloadRoutePoolAndPerAccountWithOlderServer(t *testing.T) { 5444 confATemplate := ` 5445 port: -1 5446 server_name: "A" 5447 accounts { 5448 A { users: [{user: "user1", password: "pwd"}] } 5449 } 5450 cluster { 5451 name: "local" 5452 listen: 127.0.0.1:-1 5453 %s 5454 %s 5455 } 5456 ` 5457 confA := createConfFile(t, []byte(fmt.Sprintf(confATemplate, "pool_size: 3", _EMPTY_))) 5458 srva, optsA := RunServerWithConfig(confA) 5459 defer srva.Shutdown() 5460 5461 confBCTemplate := ` 5462 port: -1 5463 server_name: "%s" 5464 accounts { 5465 A { users: [{user: "user1", password: "pwd"}] } 5466 } 5467 cluster { 5468 listen: 127.0.0.1:-1 5469 name: "local" 5470 routes = [ 5471 "nats://127.0.0.1:%d" 5472 ] 5473 %s 5474 %s 5475 } 5476 ` 5477 confB := createConfFile(t, []byte(fmt.Sprintf(confBCTemplate, "B", optsA.Cluster.Port, "pool_size: 3", _EMPTY_))) 5478 srvb, _ := RunServerWithConfig(confB) 5479 defer srvb.Shutdown() 5480 confC := createConfFile(t, []byte(fmt.Sprintf(confBCTemplate, "C", optsA.Cluster.Port, "pool_size: -1", _EMPTY_))) 5481 srvc, _ := RunServerWithConfig(confC) 5482 defer srvc.Shutdown() 5483 5484 checkClusterFormed(t, srva, srvb, srvc) 5485 5486 // Create a connection and sub on B and C 5487 ncB := natsConnect(t, srvb.ClientURL(), nats.UserInfo("user1", "pwd")) 5488 defer ncB.Close() 5489 subB := natsSubSync(t, ncB, "foo") 5490 5491 ncC := natsConnect(t, srvc.ClientURL(), nats.UserInfo("user1", "pwd")) 5492 defer ncC.Close() 5493 subC := natsSubSync(t, ncC, "bar") 5494 5495 // Check that on server B, there is interest on "bar" for account A 5496 // (coming from server C), and on server C, there is interest on "foo" 5497 // for account A (coming from server B). 5498 checkCluster := func() { 5499 t.Helper() 5500 checkClusterFormed(t, srva, srvb, srvc) 5501 checkSubInterest(t, srvb, "A", "bar", 2*time.Second) 5502 checkSubInterest(t, srvc, "A", "foo", 2*time.Second) 5503 } 5504 checkCluster() 5505 5506 sendAndRecv := func(msg string) { 5507 t.Helper() 5508 natsPub(t, ncB, "bar", []byte(msg)) 5509 if m := natsNexMsg(t, subC, time.Second); string(m.Data) != msg { 5510 t.Fatalf("Expected message %q on %q, got %q", msg, "bar", m.Data) 5511 } 5512 natsPub(t, ncC, "foo", []byte(msg)) 5513 if m := natsNexMsg(t, subB, time.Second); string(m.Data) != msg { 5514 t.Fatalf("Expected message %q on %q, got %q", msg, "foo", m.Data) 5515 } 5516 } 5517 sendAndRecv("0") 5518 5519 // Now add account "A" and do a config reload. We do this only on 5520 // server srva and srb since server C really does not change. 5521 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, "pool_size: 3", "accounts: [\"A\"]")) 5522 reloadUpdateConfig(t, srvb, confB, fmt.Sprintf(confBCTemplate, "B", optsA.Cluster.Port, "pool_size: 3", "accounts: [\"A\"]")) 5523 checkCluster() 5524 sendAndRecv("1") 5525 5526 // Remove "A" from the accounts list 5527 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, "pool_size: 3", _EMPTY_)) 5528 reloadUpdateConfig(t, srvb, confB, fmt.Sprintf(confBCTemplate, "B", optsA.Cluster.Port, "pool_size: 3", _EMPTY_)) 5529 checkCluster() 5530 sendAndRecv("2") 5531 5532 // Change the pool size 5533 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, "pool_size: 5", _EMPTY_)) 5534 reloadUpdateConfig(t, srvb, confB, fmt.Sprintf(confBCTemplate, "B", optsA.Cluster.Port, "pool_size: 5", _EMPTY_)) 5535 checkCluster() 5536 sendAndRecv("3") 5537 5538 // Add account "A" and change the pool size 5539 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, "pool_size: 4", "accounts: [\"A\"]")) 5540 reloadUpdateConfig(t, srvb, confB, fmt.Sprintf(confBCTemplate, "B", optsA.Cluster.Port, "pool_size: 4", "accounts: [\"A\"]")) 5541 checkCluster() 5542 sendAndRecv("4") 5543 5544 // Remove account "A" and change the pool size 5545 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, "pool_size: 3", _EMPTY_)) 5546 reloadUpdateConfig(t, srvb, confB, fmt.Sprintf(confBCTemplate, "B", optsA.Cluster.Port, "pool_size: 3", _EMPTY_)) 5547 checkCluster() 5548 sendAndRecv("5") 5549 } 5550 5551 func TestConfigReloadRoutePoolAndPerAccountNoDuplicateSub(t *testing.T) { 5552 confATemplate := ` 5553 port: -1 5554 server_name: "A" 5555 accounts { 5556 A { users: [{user: "user1", password: "pwd"}] } 5557 } 5558 cluster { 5559 name: "local" 5560 listen: 127.0.0.1:-1 5561 pool_size: 3 5562 %s 5563 } 5564 ` 5565 confA := createConfFile(t, []byte(fmt.Sprintf(confATemplate, _EMPTY_))) 5566 srva, optsA := RunServerWithConfig(confA) 5567 defer srva.Shutdown() 5568 5569 confBCTemplate := ` 5570 port: -1 5571 server_name: "%s" 5572 accounts { 5573 A { users: [{user: "user1", password: "pwd"}] } 5574 } 5575 cluster { 5576 listen: 127.0.0.1:-1 5577 name: "local" 5578 routes = [ 5579 "nats://127.0.0.1:%d" 5580 ] 5581 pool_size: 3 5582 %s 5583 } 5584 ` 5585 confB := createConfFile(t, []byte(fmt.Sprintf(confBCTemplate, "B", optsA.Cluster.Port, _EMPTY_))) 5586 srvb, _ := RunServerWithConfig(confB) 5587 defer srvb.Shutdown() 5588 confC := createConfFile(t, []byte(fmt.Sprintf(confBCTemplate, "C", optsA.Cluster.Port, _EMPTY_))) 5589 srvc, _ := RunServerWithConfig(confC) 5590 defer srvc.Shutdown() 5591 5592 checkClusterFormed(t, srva, srvb, srvc) 5593 5594 ncC := natsConnect(t, srvc.ClientURL(), nats.UserInfo("user1", "pwd")) 5595 defer ncC.Close() 5596 5597 ch := make(chan struct{}) 5598 wg := sync.WaitGroup{} 5599 wg.Add(1) 5600 var subs []*nats.Subscription 5601 go func() { 5602 defer wg.Done() 5603 // Limit the number of subscriptions. From experimentation, the issue would 5604 // arise around subscriptions ~700. 5605 for i := 0; i < 1000; i++ { 5606 if sub, err := ncC.SubscribeSync(fmt.Sprintf("foo.%d", i)); err == nil { 5607 subs = append(subs, sub) 5608 } 5609 select { 5610 case <-ch: 5611 return 5612 default: 5613 if i%100 == 0 { 5614 time.Sleep(5 * time.Millisecond) 5615 } 5616 } 5617 } 5618 }() 5619 5620 // Wait a tiny bit before doing the configuration reload. 5621 time.Sleep(100 * time.Millisecond) 5622 5623 reloadUpdateConfig(t, srva, confA, fmt.Sprintf(confATemplate, "accounts: [\"A\"]")) 5624 reloadUpdateConfig(t, srvb, confB, fmt.Sprintf(confBCTemplate, "B", optsA.Cluster.Port, "accounts: [\"A\"]")) 5625 reloadUpdateConfig(t, srvc, confC, fmt.Sprintf(confBCTemplate, "C", optsA.Cluster.Port, "accounts: [\"A\"]")) 5626 5627 checkClusterFormed(t, srva, srvb, srvc) 5628 5629 close(ch) 5630 wg.Wait() 5631 5632 for _, sub := range subs { 5633 checkSubInterest(t, srvb, "A", sub.Subject, 500*time.Millisecond) 5634 } 5635 5636 ncB := natsConnect(t, srvb.ClientURL(), nats.UserInfo("user1", "pwd")) 5637 defer ncB.Close() 5638 5639 for _, sub := range subs { 5640 natsPub(t, ncB, sub.Subject, []byte("hello")) 5641 } 5642 5643 // Now make sure that there is only 1 pending message for each sub. 5644 // Wait a bit to give a chance to duplicate messages to arrive if 5645 // there was a bug that would lead to a sub on each route (the pooled 5646 // and the per-account) 5647 time.Sleep(250 * time.Millisecond) 5648 for _, sub := range subs { 5649 if n, _, _ := sub.Pending(); n != 1 { 5650 t.Fatalf("Expected only 1 message for subscription on %q, got %v", sub.Subject, n) 5651 } 5652 } 5653 } 5654 5655 func TestConfigReloadGlobalAccountWithMappingAndJetStream(t *testing.T) { 5656 tmpl := ` 5657 listen: 127.0.0.1:-1 5658 server_name: %s 5659 jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} 5660 5661 mappings { 5662 subj.orig: subj.mapped.before.reload 5663 } 5664 5665 leaf { 5666 listen: 127.0.0.1:-1 5667 } 5668 5669 cluster { 5670 name: %s 5671 listen: 127.0.0.1:%d 5672 routes = [%s] 5673 } 5674 5675 # For access to system account. 5676 accounts { $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } } 5677 ` 5678 c := createJetStreamClusterWithTemplate(t, tmpl, "R3S", 3) 5679 defer c.shutdown() 5680 5681 nc, js := jsClientConnect(t, c.randomServer()) 5682 defer nc.Close() 5683 5684 // Verify that mapping works 5685 checkMapping := func(expectedSubj string) { 5686 t.Helper() 5687 sub := natsSubSync(t, nc, "subj.>") 5688 defer sub.Unsubscribe() 5689 natsPub(t, nc, "subj.orig", nil) 5690 msg := natsNexMsg(t, sub, time.Second) 5691 if msg.Subject != expectedSubj { 5692 t.Fatalf("Expected subject to have been mapped to %q, got %q", expectedSubj, msg.Subject) 5693 } 5694 } 5695 checkMapping("subj.mapped.before.reload") 5696 5697 // Create a stream and check that we can get the INFO 5698 _, err := js.AddStream(&nats.StreamConfig{ 5699 Name: "TEST", 5700 Replicas: 3, 5701 Subjects: []string{"foo"}, 5702 Retention: nats.InterestPolicy, 5703 }) 5704 require_NoError(t, err) 5705 c.waitOnStreamLeader(globalAccountName, "TEST") 5706 5707 _, err = js.StreamInfo("TEST") 5708 require_NoError(t, err) 5709 5710 // Change mapping on all servers and issue reload 5711 for i, s := range c.servers { 5712 opts := c.opts[i] 5713 content, err := os.ReadFile(opts.ConfigFile) 5714 require_NoError(t, err) 5715 reloadUpdateConfig(t, s, opts.ConfigFile, strings.Replace(string(content), "subj.mapped.before.reload", "subj.mapped.after.reload", 1)) 5716 } 5717 // Make sure the cluster is still formed 5718 checkClusterFormed(t, c.servers...) 5719 // Now repeat the test for the subject mapping and stream info 5720 checkMapping("subj.mapped.after.reload") 5721 _, err = js.StreamInfo("TEST") 5722 require_NoError(t, err) 5723 } 5724 5725 func TestConfigReloadRouteCompression(t *testing.T) { 5726 org := testDefaultClusterCompression 5727 testDefaultClusterCompression = _EMPTY_ 5728 defer func() { testDefaultClusterCompression = org }() 5729 5730 tmpl := ` 5731 port: -1 5732 server_name: "%s" 5733 cluster { 5734 port: -1 5735 name: "local" 5736 %s 5737 %s 5738 } 5739 ` 5740 conf1 := createConfFile(t, []byte(fmt.Sprintf(tmpl, "A", _EMPTY_, _EMPTY_))) 5741 s1, o1 := RunServerWithConfig(conf1) 5742 defer s1.Shutdown() 5743 5744 routes := fmt.Sprintf("routes: [\"nats://127.0.0.1:%d\"]", o1.Cluster.Port) 5745 conf2 := createConfFile(t, []byte(fmt.Sprintf(tmpl, "B", routes, _EMPTY_))) 5746 s2, _ := RunServerWithConfig(conf2) 5747 defer s2.Shutdown() 5748 5749 // Run a 3rd server but make it as if it was an old server. We want to 5750 // make sure that reload of s1 and s2 will not affect routes from s3 to 5751 // s1/s2 because these do not support compression. 5752 conf3 := createConfFile(t, []byte(fmt.Sprintf(tmpl, "C", routes, "compression: \"not supported\""))) 5753 s3, _ := RunServerWithConfig(conf3) 5754 defer s3.Shutdown() 5755 5756 checkClusterFormed(t, s1, s2, s3) 5757 5758 // Collect routes' cid from servers so we can check if routes are 5759 // recreated when they should and are not when they should not. 5760 collect := func(s *Server) map[uint64]struct{} { 5761 m := make(map[uint64]struct{}) 5762 s.mu.RLock() 5763 defer s.mu.RUnlock() 5764 s.forEachRoute(func(r *client) { 5765 r.mu.Lock() 5766 m[r.cid] = struct{}{} 5767 r.mu.Unlock() 5768 }) 5769 return m 5770 } 5771 s1RouteIDs := collect(s1) 5772 s2RouteIDs := collect(s2) 5773 s3ID := s3.ID() 5774 5775 servers := []*Server{s1, s2} 5776 checkCompMode := func(s1Expected, s2Expected string, shouldBeNew bool) { 5777 t.Helper() 5778 // We wait a bit to make sure that we have routes closed before 5779 // checking that the cluster has (re)formed. 5780 time.Sleep(100 * time.Millisecond) 5781 // First, make sure that the cluster is formed 5782 checkClusterFormed(t, s1, s2, s3) 5783 // Then check that all routes are with the expected mode. We need to 5784 // possibly wait a bit since there is negotiation going on. 5785 checkFor(t, 2*time.Second, 50*time.Millisecond, func() error { 5786 for _, s := range servers { 5787 var err error 5788 s.mu.RLock() 5789 s.forEachRoute(func(r *client) { 5790 if err != nil { 5791 return 5792 } 5793 r.mu.Lock() 5794 var exp string 5795 var m map[uint64]struct{} 5796 if r.route.remoteID == s3ID { 5797 exp = CompressionNotSupported 5798 } else if s == s1 { 5799 exp = s1Expected 5800 } else { 5801 exp = s2Expected 5802 } 5803 if s == s1 { 5804 m = s1RouteIDs 5805 } else { 5806 m = s2RouteIDs 5807 } 5808 _, present := m[r.cid] 5809 cm := r.route.compression 5810 r.mu.Unlock() 5811 if cm != exp { 5812 err = fmt.Errorf("Expected route %v for server %s to have compression mode %q, got %q", r, s, exp, cm) 5813 } 5814 sbn := shouldBeNew 5815 if exp == CompressionNotSupported { 5816 // Override for routes to s3 5817 sbn = false 5818 } 5819 if sbn && present { 5820 err = fmt.Errorf("Expected route %v for server %s to be a new route, but it was already present", r, s) 5821 } else if !sbn && !present { 5822 err = fmt.Errorf("Expected route %v for server %s to not be new", r, s) 5823 } 5824 }) 5825 s.mu.RUnlock() 5826 if err != nil { 5827 return err 5828 } 5829 } 5830 s1RouteIDs = collect(s1) 5831 s2RouteIDs = collect(s2) 5832 return nil 5833 }) 5834 } 5835 // Since both started without any compression setting, we default to 5836 // "accept" which means that a server can accept/switch to compression 5837 // but not initiate compression, so they should both be "off" 5838 checkCompMode(CompressionOff, CompressionOff, false) 5839 5840 // Now reload s1 with "on" (s2_fast), since s2 is *configured* with "accept", 5841 // they should both be CompressionS2Fast, even before we reload s2. 5842 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl, "A", _EMPTY_, "compression: on")) 5843 checkCompMode(CompressionS2Fast, CompressionS2Fast, true) 5844 // Now reload s2 5845 reloadUpdateConfig(t, s2, conf2, fmt.Sprintf(tmpl, "B", routes, "compression: on")) 5846 checkCompMode(CompressionS2Fast, CompressionS2Fast, false) 5847 5848 // Move on with "better" 5849 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl, "A", _EMPTY_, "compression: s2_better")) 5850 // s1 should be at "better", but s2 still at "fast" 5851 checkCompMode(CompressionS2Better, CompressionS2Fast, false) 5852 // Now reload s2 5853 reloadUpdateConfig(t, s2, conf2, fmt.Sprintf(tmpl, "B", routes, "compression: s2_better")) 5854 checkCompMode(CompressionS2Better, CompressionS2Better, false) 5855 5856 // Move to "best" 5857 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl, "A", _EMPTY_, "compression: s2_best")) 5858 checkCompMode(CompressionS2Best, CompressionS2Better, false) 5859 // Now reload s2 5860 reloadUpdateConfig(t, s2, conf2, fmt.Sprintf(tmpl, "B", routes, "compression: s2_best")) 5861 checkCompMode(CompressionS2Best, CompressionS2Best, false) 5862 5863 // Now turn off 5864 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl, "A", _EMPTY_, "compression: off")) 5865 checkCompMode(CompressionOff, CompressionOff, true) 5866 // Now reload s2 5867 reloadUpdateConfig(t, s2, conf2, fmt.Sprintf(tmpl, "B", routes, "compression: off")) 5868 checkCompMode(CompressionOff, CompressionOff, false) 5869 5870 // When "off" (and not "accept"), enabling 1 is not enough, the reload 5871 // has to be done on both to take effect. 5872 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl, "A", _EMPTY_, "compression: s2_better")) 5873 checkCompMode(CompressionOff, CompressionOff, true) 5874 // Now reload s2 5875 reloadUpdateConfig(t, s2, conf2, fmt.Sprintf(tmpl, "B", routes, "compression: s2_better")) 5876 checkCompMode(CompressionS2Better, CompressionS2Better, true) 5877 5878 // Try now to have different ones 5879 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl, "A", _EMPTY_, "compression: s2_best")) 5880 // S1 should be "best" but S2 should have stayed at "better" 5881 checkCompMode(CompressionS2Best, CompressionS2Better, false) 5882 5883 // If we remove the compression setting, it defaults to "accept", which 5884 // in that case we want to have a negotiation and use the remote's compression 5885 // level. So connections should be re-created. 5886 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl, "A", _EMPTY_, _EMPTY_)) 5887 checkCompMode(CompressionS2Better, CompressionS2Better, true) 5888 5889 // To avoid flapping, add a little sleep here to make sure we have things 5890 // settled before reloading s2. 5891 time.Sleep(100 * time.Millisecond) 5892 // And if we do the same with s2, then we will end-up with no compression. 5893 reloadUpdateConfig(t, s2, conf2, fmt.Sprintf(tmpl, "B", routes, _EMPTY_)) 5894 checkCompMode(CompressionOff, CompressionOff, true) 5895 } 5896 5897 func TestConfigReloadRouteCompressionS2Auto(t *testing.T) { 5898 // This test checks s2_auto specific behavior. It makes sure that we update 5899 // only if the rtt_thresholds and current RTT value warrants a change and 5900 // also that we actually save in c.route.compression the actual compression 5901 // level (not s2_auto). 5902 tmpl1 := ` 5903 port: -1 5904 server_name: "A" 5905 cluster { 5906 port: -1 5907 name: "local" 5908 pool_size: -1 5909 compression: {mode: s2_auto, rtt_thresholds: [%s]} 5910 } 5911 ` 5912 conf1 := createConfFile(t, []byte(fmt.Sprintf(tmpl1, "50ms, 100ms, 150ms"))) 5913 s1, o1 := RunServerWithConfig(conf1) 5914 defer s1.Shutdown() 5915 5916 conf2 := createConfFile(t, []byte(fmt.Sprintf(` 5917 port: -1 5918 server_name: "B" 5919 cluster { 5920 port: -1 5921 name: "local" 5922 pool_size: -1 5923 compression: s2_fast 5924 routes: ["nats://127.0.0.1:%d"] 5925 } 5926 `, o1.Cluster.Port))) 5927 s2, _ := RunServerWithConfig(conf2) 5928 defer s2.Shutdown() 5929 5930 checkClusterFormed(t, s1, s2) 5931 5932 getCompInfo := func() (string, io.Writer) { 5933 var cm string 5934 var cw io.Writer 5935 s1.mu.RLock() 5936 // There should be only 1 route... 5937 s1.forEachRemote(func(r *client) { 5938 r.mu.Lock() 5939 cm = r.route.compression 5940 cw = r.out.cw 5941 r.mu.Unlock() 5942 }) 5943 s1.mu.RUnlock() 5944 return cm, cw 5945 } 5946 // Capture the s2 writer from s1 to s2 5947 cm, cw := getCompInfo() 5948 5949 // We do a reload but really the mode is still s2_auto (even if the current 5950 // compression level may be "uncompressed", "better", etc.. so we don't 5951 // expect the writer to have changed. 5952 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl1, "100ms, 200ms, 300ms")) 5953 if ncm, ncw := getCompInfo(); ncm != cm || ncw != cw { 5954 t.Fatalf("Expected compression info to have stayed the same, was %q - %p, got %q - %p", cm, cw, ncm, ncw) 5955 } 5956 } 5957 5958 func TestConfigReloadLeafNodeCompression(t *testing.T) { 5959 org := testDefaultLeafNodeCompression 5960 testDefaultLeafNodeCompression = _EMPTY_ 5961 defer func() { testDefaultLeafNodeCompression = org }() 5962 5963 tmpl1 := ` 5964 port: -1 5965 server_name: "A" 5966 leafnodes { 5967 port: -1 5968 %s 5969 } 5970 ` 5971 conf1 := createConfFile(t, []byte(fmt.Sprintf(tmpl1, "compression: accept"))) 5972 s1, o1 := RunServerWithConfig(conf1) 5973 defer s1.Shutdown() 5974 5975 port := o1.LeafNode.Port 5976 5977 tmpl2 := ` 5978 port: -1 5979 server_name: "%s" 5980 leafnodes { 5981 remotes [ 5982 { 5983 url: "nats://127.0.0.1:%d" 5984 %s 5985 } 5986 ] 5987 } 5988 ` 5989 conf2 := createConfFile(t, []byte(fmt.Sprintf(tmpl2, "B", port, "compression: accept"))) 5990 s2, _ := RunServerWithConfig(conf2) 5991 defer s2.Shutdown() 5992 5993 // Run a 3rd server but make it as if it was an old server. We want to 5994 // make sure that reload of s1 and s2 will not affect leafnodes from s3 to 5995 // s1/s2 because these do not support compression. 5996 conf3 := createConfFile(t, []byte(fmt.Sprintf(tmpl2, "C", port, "compression: \"not supported\""))) 5997 s3, _ := RunServerWithConfig(conf3) 5998 defer s3.Shutdown() 5999 6000 checkLeafNodeConnected(t, s2) 6001 checkLeafNodeConnected(t, s3) 6002 checkLeafNodeConnectedCount(t, s1, 2) 6003 6004 // Collect leafnodes' cid from servers so we can check if connections are 6005 // recreated when they should and are not when they should not. 6006 collect := func(s *Server) map[uint64]struct{} { 6007 m := make(map[uint64]struct{}) 6008 s.mu.RLock() 6009 defer s.mu.RUnlock() 6010 for _, l := range s.leafs { 6011 l.mu.Lock() 6012 m[l.cid] = struct{}{} 6013 l.mu.Unlock() 6014 } 6015 return m 6016 } 6017 s1LeafNodeIDs := collect(s1) 6018 s2LeafNodeIDs := collect(s2) 6019 6020 servers := []*Server{s1, s2} 6021 checkCompMode := func(s1Expected, s2Expected string, shouldBeNew bool) { 6022 t.Helper() 6023 // We wait a bit to make sure that we have leaf connections closed 6024 // before checking that they are properly reconnected. 6025 time.Sleep(100 * time.Millisecond) 6026 checkLeafNodeConnected(t, s2) 6027 checkLeafNodeConnected(t, s3) 6028 checkLeafNodeConnectedCount(t, s1, 2) 6029 // Check that all leafnodes are with the expected mode. We need to 6030 // possibly wait a bit since there is negotiation going on. 6031 checkFor(t, 2*time.Second, 50*time.Millisecond, func() error { 6032 for _, s := range servers { 6033 var err error 6034 s.mu.RLock() 6035 for _, l := range s.leafs { 6036 l.mu.Lock() 6037 var exp string 6038 var m map[uint64]struct{} 6039 if l.leaf.remoteServer == "C" { 6040 exp = CompressionNotSupported 6041 } else if s == s1 { 6042 exp = s1Expected 6043 } else { 6044 exp = s2Expected 6045 } 6046 if s == s1 { 6047 m = s1LeafNodeIDs 6048 } else { 6049 m = s2LeafNodeIDs 6050 } 6051 _, present := m[l.cid] 6052 cm := l.leaf.compression 6053 l.mu.Unlock() 6054 if cm != exp { 6055 err = fmt.Errorf("Expected leaf %v for server %s to have compression mode %q, got %q", l, s, exp, cm) 6056 } 6057 sbn := shouldBeNew 6058 if exp == CompressionNotSupported { 6059 // Override for routes to s3 6060 sbn = false 6061 } 6062 if sbn && present { 6063 err = fmt.Errorf("Expected leaf %v for server %s to be a new leaf, but it was already present", l, s) 6064 } else if !sbn && !present { 6065 err = fmt.Errorf("Expected leaf %v for server %s to not be new", l, s) 6066 } 6067 if err != nil { 6068 break 6069 } 6070 } 6071 s.mu.RUnlock() 6072 if err != nil { 6073 return err 6074 } 6075 } 6076 s1LeafNodeIDs = collect(s1) 6077 s2LeafNodeIDs = collect(s2) 6078 return nil 6079 }) 6080 } 6081 // Since both started with compression "accept", they should both be set to "off" 6082 checkCompMode(CompressionOff, CompressionOff, false) 6083 6084 // Now reload s1 with "on" (s2_auto), since s2 is *configured* with "accept", 6085 // s1 should be "uncompressed" (due to low RTT), and s2 is in that case set 6086 // to s2_fast. 6087 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl1, "compression: on")) 6088 checkCompMode(CompressionS2Uncompressed, CompressionS2Fast, true) 6089 // Now reload s2 6090 reloadUpdateConfig(t, s2, conf2, fmt.Sprintf(tmpl2, "B", port, "compression: on")) 6091 checkCompMode(CompressionS2Uncompressed, CompressionS2Uncompressed, false) 6092 6093 // Move on with "better" 6094 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl1, "compression: s2_better")) 6095 // s1 should be at "better", but s2 still at "uncompressed" 6096 checkCompMode(CompressionS2Better, CompressionS2Uncompressed, false) 6097 // Now reload s2 6098 reloadUpdateConfig(t, s2, conf2, fmt.Sprintf(tmpl2, "B", port, "compression: s2_better")) 6099 checkCompMode(CompressionS2Better, CompressionS2Better, false) 6100 6101 // Move to "best" 6102 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl1, "compression: s2_best")) 6103 checkCompMode(CompressionS2Best, CompressionS2Better, false) 6104 // Now reload s2 6105 reloadUpdateConfig(t, s2, conf2, fmt.Sprintf(tmpl2, "B", port, "compression: s2_best")) 6106 checkCompMode(CompressionS2Best, CompressionS2Best, false) 6107 6108 // Now turn off 6109 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl1, "compression: off")) 6110 checkCompMode(CompressionOff, CompressionOff, true) 6111 // Now reload s2 6112 reloadUpdateConfig(t, s2, conf2, fmt.Sprintf(tmpl2, "B", port, "compression: off")) 6113 checkCompMode(CompressionOff, CompressionOff, false) 6114 6115 // When "off" (and not "accept"), enabling 1 is not enough, the reload 6116 // has to be done on both to take effect. 6117 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl1, "compression: s2_better")) 6118 checkCompMode(CompressionOff, CompressionOff, true) 6119 // Now reload s2 6120 reloadUpdateConfig(t, s2, conf2, fmt.Sprintf(tmpl2, "B", port, "compression: s2_better")) 6121 checkCompMode(CompressionS2Better, CompressionS2Better, true) 6122 6123 // Try now to have different ones 6124 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl1, "compression: s2_best")) 6125 // S1 should be "best" but S2 should have stayed at "better" 6126 checkCompMode(CompressionS2Best, CompressionS2Better, false) 6127 6128 // Change the setting to "accept", which in that case we want to have a 6129 // negotiation and use the remote's compression level. So connections 6130 // should be re-created. 6131 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl1, "compression: accept")) 6132 checkCompMode(CompressionS2Better, CompressionS2Better, true) 6133 6134 // To avoid flapping, add a little sleep here to make sure we have things 6135 // settled before reloading s2. 6136 time.Sleep(100 * time.Millisecond) 6137 // And if we do the same with s2, then we will end-up with no compression. 6138 reloadUpdateConfig(t, s2, conf2, fmt.Sprintf(tmpl2, "B", port, "compression: accept")) 6139 checkCompMode(CompressionOff, CompressionOff, true) 6140 6141 // Now remove completely and we should default to s2_auto, which means that 6142 // s1 should be at "uncompressed" and s2 to "fast". 6143 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl1, _EMPTY_)) 6144 checkCompMode(CompressionS2Uncompressed, CompressionS2Fast, true) 6145 6146 // Now with s2, both will be "uncompressed" 6147 reloadUpdateConfig(t, s2, conf2, fmt.Sprintf(tmpl2, "B", port, _EMPTY_)) 6148 checkCompMode(CompressionS2Uncompressed, CompressionS2Uncompressed, false) 6149 } 6150 6151 func TestConfigReloadLeafNodeCompressionS2Auto(t *testing.T) { 6152 // This test checks s2_auto specific behavior. It makes sure that we update 6153 // only if the rtt_thresholds and current RTT value warrants a change and 6154 // also that we actually save in c.leaf.compression the actual compression 6155 // level (not s2_auto). 6156 tmpl1 := ` 6157 port: -1 6158 server_name: "A" 6159 leafnodes { 6160 port: -1 6161 compression: {mode: s2_auto, rtt_thresholds: [%s]} 6162 } 6163 ` 6164 conf1 := createConfFile(t, []byte(fmt.Sprintf(tmpl1, "50ms, 100ms, 150ms"))) 6165 s1, o1 := RunServerWithConfig(conf1) 6166 defer s1.Shutdown() 6167 6168 conf2 := createConfFile(t, []byte(fmt.Sprintf(` 6169 port: -1 6170 server_name: "B" 6171 leafnodes { 6172 remotes [{ url: "nats://127.0.0.1:%d", compression: s2_fast}] 6173 } 6174 `, o1.LeafNode.Port))) 6175 s2, _ := RunServerWithConfig(conf2) 6176 defer s2.Shutdown() 6177 6178 checkLeafNodeConnected(t, s2) 6179 6180 getCompInfo := func() (string, io.Writer) { 6181 var cm string 6182 var cw io.Writer 6183 s1.mu.RLock() 6184 // There should be only 1 leaf... 6185 for _, l := range s1.leafs { 6186 l.mu.Lock() 6187 cm = l.leaf.compression 6188 cw = l.out.cw 6189 l.mu.Unlock() 6190 } 6191 s1.mu.RUnlock() 6192 return cm, cw 6193 } 6194 // Capture the s2 writer from s1 to s2 6195 cm, cw := getCompInfo() 6196 6197 // We do a reload but really the mode is still s2_auto (even if the current 6198 // compression level may be "uncompressed", "better", etc.. so we don't 6199 // expect the writer to have changed. 6200 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl1, "100ms, 200ms, 300ms")) 6201 if ncm, ncw := getCompInfo(); ncm != cm || ncw != cw { 6202 t.Fatalf("Expected compression info to have stayed the same, was %q - %p, got %q - %p", cm, cw, ncm, ncw) 6203 } 6204 } 6205 6206 func TestConfigReloadNoPanicOnShutdown(t *testing.T) { 6207 tmpl := ` 6208 port: -1 6209 jetstream: true 6210 accounts { 6211 A { 6212 users: [{user: A, password: pwd}] 6213 %s 6214 } 6215 B { 6216 users: [{user: B, password: pwd}] 6217 } 6218 } 6219 ` 6220 for i := 0; i < 50; i++ { 6221 conf := createConfFile(t, []byte(fmt.Sprintf(tmpl, _EMPTY_))) 6222 s, _ := RunServerWithConfig(conf) 6223 // Don't use a defer s.Shutdown() here since it would prevent the panic 6224 // to be reported (but the test would still fail because of a runtime timeout). 6225 6226 err := os.WriteFile(conf, []byte(fmt.Sprintf(tmpl, "jetstream: true")), 0666) 6227 require_NoError(t, err) 6228 6229 wg := sync.WaitGroup{} 6230 wg.Add(1) 6231 go func() { 6232 defer wg.Done() 6233 time.Sleep(10 * time.Millisecond) 6234 s.Shutdown() 6235 }() 6236 6237 time.Sleep(8 * time.Millisecond) 6238 err = s.Reload() 6239 require_NoError(t, err) 6240 wg.Wait() 6241 } 6242 }