github.com/nats-io/nats-server/v2@v2.11.0-preview.2/server/msgtrace_test.go (about) 1 // Copyright 2024 The NATS Authors 2 // Licensed under the Apache License, Version 2.0 (the "License"); 3 // you may not use this file except in compliance with the License. 4 // You may obtain a copy of the License at 5 // 6 // http://www.apache.org/licenses/LICENSE-2.0 7 // 8 // Unless required by applicable law or agreed to in writing, software 9 // distributed under the License is distributed on an "AS IS" BASIS, 10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 //go:build !skip_msgtrace_tests 15 // +build !skip_msgtrace_tests 16 17 package server 18 19 import ( 20 "bytes" 21 "compress/gzip" 22 "encoding/json" 23 "fmt" 24 "io" 25 "net" 26 "strings" 27 "sync/atomic" 28 "testing" 29 "time" 30 31 "github.com/klauspost/compress/s2" 32 "github.com/nats-io/jwt/v2" 33 "github.com/nats-io/nats.go" 34 "github.com/nats-io/nkeys" 35 ) 36 37 func init() { 38 msgTraceRunInTests = true 39 } 40 41 func TestMsgTraceConnName(t *testing.T) { 42 c := &client{kind: ROUTER, route: &route{remoteName: "somename"}} 43 c.opts.Name = "someid" 44 45 // If route.remoteName is set, it will take precedence. 46 val := getConnName(c) 47 require_Equal[string](t, val, "somename") 48 // When not set, we revert to c.opts.Name 49 c.route.remoteName = _EMPTY_ 50 val = getConnName(c) 51 require_Equal[string](t, val, "someid") 52 53 // Now same for GW. 54 c.route = nil 55 c.gw = &gateway{remoteName: "somename"} 56 c.kind = GATEWAY 57 val = getConnName(c) 58 require_Equal[string](t, val, "somename") 59 // Revert to c.opts.Name 60 c.gw.remoteName = _EMPTY_ 61 val = getConnName(c) 62 require_Equal[string](t, val, "someid") 63 64 // For LeafNode now 65 c.gw = nil 66 c.leaf = &leaf{remoteServer: "somename"} 67 c.kind = LEAF 68 val = getConnName(c) 69 require_Equal[string](t, val, "somename") 70 // But if not set... 71 c.leaf.remoteServer = _EMPTY_ 72 val = getConnName(c) 73 require_Equal[string](t, val, "someid") 74 75 c.leaf = nil 76 c.kind = CLIENT 77 val = getConnName(c) 78 require_Equal[string](t, val, "someid") 79 } 80 81 func TestMsgTraceGenHeaderMap(t *testing.T) { 82 for _, test := range []struct { 83 name string 84 header []byte 85 expected map[string][]string 86 external bool 87 }{ 88 {"missing header line", []byte("Nats-Trace-Dest: val\r\n"), nil, false}, 89 {"no trace header present", []byte(hdrLine + "Header1: val1\r\nHeader2: val2\r\n"), nil, false}, 90 {"trace header with some prefix", []byte(hdrLine + "Some-Prefix-" + MsgTraceDest + ": some value\r\n"), nil, false}, 91 {"trace header with some suffix", []byte(hdrLine + MsgTraceDest + "-Some-Suffix: some value\r\n"), nil, false}, 92 {"trace header with space before colon", []byte(hdrLine + MsgTraceDest + " : some value\r\n"), nil, false}, 93 {"trace header with missing cr_lf for value", []byte(hdrLine + MsgTraceDest + " : bogus"), nil, false}, 94 {"external trace header with some prefix", []byte(hdrLine + "Some-Prefix-" + traceParentHdr + ": 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01\r\n"), nil, false}, 95 {"external trace header with some suffix", []byte(hdrLine + traceParentHdr + "-Some-Suffix: 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01\r\n"), nil, false}, 96 {"external header with space before colon", []byte(hdrLine + traceParentHdr + " : 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01\r\n"), nil, false}, 97 {"external header with missing cr_lf for value", []byte(hdrLine + traceParentHdr + " : 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01"), nil, false}, 98 {"trace header first", []byte(hdrLine + MsgTraceDest + ": some.dest\r\nSome-Header: some value\r\n"), 99 map[string][]string{"Some-Header": {"some value"}, MsgTraceDest: {"some.dest"}}, false}, 100 {"trace header last", []byte(hdrLine + "Some-Header: some value\r\n" + MsgTraceDest + ": some.dest\r\n"), 101 map[string][]string{"Some-Header": {"some value"}, MsgTraceDest: {"some.dest"}}, false}, 102 {"trace header multiple values", []byte(hdrLine + MsgTraceDest + ": some.dest\r\nSome-Header: some value\r\n" + MsgTraceDest + ": some.dest.2"), 103 map[string][]string{"Some-Header": {"some value"}, MsgTraceDest: {"some.dest", "some.dest.2"}}, false}, 104 {"trace header and some empty key", []byte(hdrLine + MsgTraceDest + ": some.dest\r\n: bogus\r\nSome-Header: some value\r\n"), 105 map[string][]string{"Some-Header": {"some value"}, MsgTraceDest: {"some.dest"}}, false}, 106 {"trace header and some header missing cr_lf for value", []byte(hdrLine + MsgTraceDest + ": some.dest\r\nSome-Header: bogus"), 107 map[string][]string{MsgTraceDest: {"some.dest"}}, false}, 108 {"trace header and external after", []byte(hdrLine + MsgTraceDest + ": some.dest\r\n" + traceParentHdr + ": 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01\r\nSome-Header: some value\r\n"), 109 map[string][]string{"Some-Header": {"some value"}, MsgTraceDest: {"some.dest"}, traceParentHdr: {"00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01"}}, false}, 110 {"trace header and external before", []byte(hdrLine + traceParentHdr + ": 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01\r\n" + MsgTraceDest + ": some.dest\r\nSome-Header: some value\r\n"), 111 map[string][]string{"Some-Header": {"some value"}, MsgTraceDest: {"some.dest"}, traceParentHdr: {"00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01"}}, false}, 112 {"external malformed", []byte(hdrLine + traceParentHdr + ": 00-4bf92f3577b34da6a3ce929d0e0e4736-01\r\n"), nil, false}, 113 {"external first and sampling", []byte(hdrLine + traceParentHdr + ": 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01\r\nSome-Header: some value\r\n"), 114 map[string][]string{"Some-Header": {"some value"}, traceParentHdr: {"00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01"}}, true}, 115 {"external middle and sampling", []byte(hdrLine + "Some-Header: some value1\r\n" + traceParentHdr + ": 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01\r\nSome-Header: some value2\r\n"), 116 map[string][]string{"Some-Header": {"some value1", "some value2"}, traceParentHdr: {"00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01"}}, true}, 117 {"external last and sampling", []byte(hdrLine + "Some-Header: some value\r\n" + traceParentHdr + ": 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01\r\n"), 118 map[string][]string{"Some-Header": {"some value"}, traceParentHdr: {"00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01"}}, true}, 119 {"external sampling with not just 01", []byte(hdrLine + traceParentHdr + ": 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-27\r\nSome-Header: some value\r\n"), 120 map[string][]string{"Some-Header": {"some value"}, traceParentHdr: {"00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-27"}}, true}, 121 {"external with different case and sampling", []byte(hdrLine + "TrAcEpArEnT: 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01\r\nSome-Header: some value\r\n"), 122 map[string][]string{"Some-Header": {"some value"}, traceParentHdr: {"00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01"}}, true}, 123 {"external first and not sampling", []byte(hdrLine + traceParentHdr + ": 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00\r\nSome-Header: some value\r\n"), nil, false}, 124 {"external middle and not sampling", []byte(hdrLine + "Some-Header: some value1\r\n" + traceParentHdr + ": 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00\r\nSome-Header: some value2\r\n"), nil, false}, 125 {"external last and not sampling", []byte(hdrLine + "Some-Header: some value\r\n" + traceParentHdr + ": 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00\r\n"), nil, false}, 126 {"external not sampling with not just 00", []byte(hdrLine + traceParentHdr + ": 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-22\r\nSome-Header: some value\r\n"), nil, false}, 127 } { 128 t.Run(test.name, func(t *testing.T) { 129 m, ext := genHeaderMapIfTraceHeadersPresent(test.header) 130 if test.external != ext { 131 t.Fatalf("Expected external to be %v, got %v", test.external, ext) 132 } 133 if len(test.expected) != len(m) { 134 t.Fatalf("Expected map to be of size %v, got %v", len(test.expected), len(m)) 135 } 136 // If external, we should find traceParentHdr 137 if test.external { 138 if _, ok := m[traceParentHdr]; !ok { 139 t.Fatalf("Expected traceparent header to be present, it was not: %+v", m) 140 } 141 // Header should have been rewritten, so we should find it in original header. 142 if !bytes.Contains(test.header, []byte(traceParentHdr)) { 143 t.Fatalf("Header should have been rewritten to have the traceparent in lower case: %s", test.header) 144 } 145 } 146 for k, vv := range m { 147 evv, ok := test.expected[k] 148 if !ok { 149 t.Fatalf("Did not find header %q in resulting map: %+v", k, m) 150 } 151 for i, v := range vv { 152 if evv[i] != v { 153 t.Fatalf("Expected value %v of key %q to be %q, got %q", i, k, evv[i], v) 154 } 155 } 156 } 157 }) 158 } 159 } 160 161 func TestMsgTraceBasic(t *testing.T) { 162 conf := createConfFile(t, []byte(` 163 listen: 127.0.0.1:-1 164 mappings = { 165 foo: bar 166 } 167 `)) 168 s, _ := RunServerWithConfig(conf) 169 defer s.Shutdown() 170 171 nc := natsConnect(t, s.ClientURL()) 172 defer nc.Close() 173 cid, err := nc.GetClientID() 174 require_NoError(t, err) 175 176 traceSub := natsSubSync(t, nc, "my.trace.subj") 177 natsFlush(t, nc) 178 179 // Send trace message to a dummy subject to check that resulting trace's 180 // SubjectMapping and Egress are nil. 181 msg := nats.NewMsg("dummy") 182 msg.Header.Set(MsgTraceDest, traceSub.Subject) 183 msg.Header.Set(MsgTraceOnly, "true") 184 msg.Data = []byte("hello!") 185 err = nc.PublishMsg(msg) 186 require_NoError(t, err) 187 188 traceMsg := natsNexMsg(t, traceSub, time.Second) 189 var e MsgTraceEvent 190 json.Unmarshal(traceMsg.Data, &e) 191 require_Equal[string](t, e.Server.Name, s.Name()) 192 // We don't remove the headers, so we will find the tracing header there. 193 require_True(t, e.Request.Header != nil) 194 require_Equal[int](t, len(e.Request.Header), 2) 195 // The message size is 6 + whatever size for the 2 trace headers. 196 // Let's just make sure that size is > 20... 197 require_True(t, e.Request.MsgSize > 20) 198 ingress := e.Ingress() 199 require_True(t, ingress != nil) 200 require_True(t, ingress.Kind == CLIENT) 201 require_True(t, ingress.Timestamp != time.Time{}) 202 require_Equal[uint64](t, ingress.CID, cid) 203 require_Equal[string](t, ingress.Name, _EMPTY_) 204 require_Equal[string](t, ingress.Account, globalAccountName) 205 require_Equal[string](t, ingress.Subject, "dummy") 206 require_Equal[string](t, ingress.Error, _EMPTY_) 207 require_True(t, e.SubjectMapping() == nil) 208 require_True(t, e.StreamExports() == nil) 209 require_True(t, e.ServiceImports() == nil) 210 require_True(t, e.JetStream() == nil) 211 require_True(t, e.Egresses() == nil) 212 213 // Now setup subscriptions that generate interest on the subject. 214 nc2 := natsConnect(t, s.ClientURL(), nats.Name("sub1And2")) 215 defer nc2.Close() 216 sub1 := natsSubSync(t, nc2, "bar") 217 sub2 := natsSubSync(t, nc2, "bar") 218 natsFlush(t, nc2) 219 nc2CID, _ := nc2.GetClientID() 220 221 nc3 := natsConnect(t, s.ClientURL()) 222 defer nc3.Close() 223 sub3 := natsSubSync(t, nc3, "*") 224 natsFlush(t, nc3) 225 226 for _, test := range []struct { 227 name string 228 deliverMsg bool 229 }{ 230 {"just trace", false}, 231 {"deliver msg", true}, 232 } { 233 t.Run(test.name, func(t *testing.T) { 234 msg = nats.NewMsg("foo") 235 msg.Header.Set("Some-App-Header", "some value") 236 msg.Header.Set(MsgTraceDest, traceSub.Subject) 237 if !test.deliverMsg { 238 msg.Header.Set(MsgTraceOnly, "true") 239 } 240 msg.Data = []byte("hello!") 241 err = nc.PublishMsg(msg) 242 require_NoError(t, err) 243 244 checkAppMsg := func(sub *nats.Subscription, expected bool) { 245 if expected { 246 appMsg := natsNexMsg(t, sub, time.Second) 247 require_Equal[string](t, string(appMsg.Data), "hello!") 248 // We don't remove message trace header, so we should have 249 // 2 headers (the app + trace destination) 250 require_True(t, len(appMsg.Header) == 2) 251 require_Equal[string](t, appMsg.Header.Get("Some-App-Header"), "some value") 252 require_Equal[string](t, appMsg.Header.Get(MsgTraceDest), traceSub.Subject) 253 } 254 // Check that no (more) messages are received. 255 if msg, err := sub.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 256 t.Fatalf("Did not expect application message, got %s", msg.Data) 257 } 258 } 259 for _, sub := range []*nats.Subscription{sub1, sub2, sub3} { 260 checkAppMsg(sub, test.deliverMsg) 261 } 262 263 traceMsg = natsNexMsg(t, traceSub, time.Second) 264 e = MsgTraceEvent{} 265 json.Unmarshal(traceMsg.Data, &e) 266 require_Equal[string](t, e.Server.Name, s.Name()) 267 require_True(t, e.Request.Header != nil) 268 // We should have the app header and the trace header(s) too. 269 expected := 2 270 if !test.deliverMsg { 271 // The "trace-only" header is added. 272 expected++ 273 } 274 require_Equal[int](t, len(e.Request.Header), expected) 275 require_Equal[string](t, e.Request.Header["Some-App-Header"][0], "some value") 276 // The message size is 6 + whatever size for the 2 trace headers. 277 // Let's just make sure that size is > 20... 278 require_True(t, e.Request.MsgSize > 20) 279 ingress := e.Ingress() 280 require_True(t, ingress.Kind == CLIENT) 281 require_True(t, ingress.Timestamp != time.Time{}) 282 require_Equal[string](t, ingress.Account, globalAccountName) 283 require_Equal[string](t, ingress.Subject, "foo") 284 sm := e.SubjectMapping() 285 require_True(t, sm != nil) 286 require_True(t, sm.Timestamp != time.Time{}) 287 require_Equal[string](t, sm.MappedTo, "bar") 288 egress := e.Egresses() 289 require_Equal[int](t, len(egress), 3) 290 var sub1And2 int 291 for _, eg := range egress { 292 // All Egress should be clients 293 require_True(t, eg.Kind == CLIENT) 294 require_True(t, eg.Timestamp != time.Time{}) 295 // For nc2CID, we should have two egress 296 if eg.CID == nc2CID { 297 // Check name 298 require_Equal[string](t, eg.Name, "sub1And2") 299 require_Equal[string](t, eg.Subscription, "bar") 300 sub1And2++ 301 } else { 302 // No name set 303 require_Equal[string](t, eg.Name, _EMPTY_) 304 require_Equal[string](t, eg.Subscription, "*") 305 } 306 } 307 require_Equal[int](t, sub1And2, 2) 308 }) 309 } 310 } 311 312 func TestMsgTraceIngressMaxPayloadError(t *testing.T) { 313 o := DefaultOptions() 314 o.MaxPayload = 1024 315 s := RunServer(o) 316 defer s.Shutdown() 317 318 nc := natsConnect(t, s.ClientURL()) 319 defer nc.Close() 320 321 traceSub := natsSubSync(t, nc, "my.trace.subj") 322 natsSub(t, nc, "foo", func(_ *nats.Msg) {}) 323 natsFlush(t, nc) 324 325 for _, test := range []struct { 326 name string 327 deliverMsg bool 328 }{ 329 {"just trace", false}, 330 {"deliver msg", true}, 331 } { 332 t.Run(test.name, func(t *testing.T) { 333 nc2, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", o.Port)) 334 require_NoError(t, err) 335 defer nc2.Close() 336 337 nc2.Write([]byte("CONNECT {\"protocol\":1,\"headers\":true,\"no_responders\":true}\r\n")) 338 339 var traceOnlyHdr string 340 if !test.deliverMsg { 341 traceOnlyHdr = fmt.Sprintf("%s:true\r\n", MsgTraceOnly) 342 } 343 hdr := fmt.Sprintf("%s%s:%s\r\n%s\r\n", hdrLine, MsgTraceDest, traceSub.Subject, traceOnlyHdr) 344 hPub := fmt.Sprintf("HPUB foo %d 2048\r\n%sAAAAAAAAAAAAAAAAAA...", len(hdr), hdr) 345 nc2.Write([]byte(hPub)) 346 347 traceMsg := natsNexMsg(t, traceSub, time.Second) 348 var e MsgTraceEvent 349 json.Unmarshal(traceMsg.Data, &e) 350 require_Equal[string](t, e.Server.Name, s.Name()) 351 require_True(t, e.Request.Header == nil) 352 require_True(t, e.Ingress() != nil) 353 require_Contains(t, e.Ingress().Error, ErrMaxPayload.Error()) 354 require_True(t, e.Egresses() == nil) 355 }) 356 } 357 } 358 359 func TestMsgTraceIngressErrors(t *testing.T) { 360 conf := createConfFile(t, []byte(` 361 port: -1 362 accounts { 363 A { 364 users: [ 365 { 366 user: a 367 password: pwd 368 permissions { 369 subscribe: ["my.trace.subj", "foo"] 370 publish { 371 allow: ["foo", "bar.>"] 372 deny: ["bar.baz"] 373 } 374 } 375 } 376 ] 377 } 378 } 379 `)) 380 s, _ := RunServerWithConfig(conf) 381 defer s.Shutdown() 382 383 nc := natsConnect(t, s.ClientURL(), nats.UserInfo("a", "pwd")) 384 defer nc.Close() 385 386 traceSub := natsSubSync(t, nc, "my.trace.subj") 387 natsSub(t, nc, "foo", func(_ *nats.Msg) {}) 388 natsFlush(t, nc) 389 390 for _, test := range []struct { 391 name string 392 deliverMsg bool 393 }{ 394 {"just trace", false}, 395 {"deliver msg", true}, 396 } { 397 t.Run(test.name, func(t *testing.T) { 398 nc2 := natsConnect(t, s.ClientURL(), nats.UserInfo("a", "pwd")) 399 defer nc2.Close() 400 401 sendMsg := func(subj, reply, errTxt string) { 402 msg := nats.NewMsg(subj) 403 msg.Header.Set(MsgTraceDest, traceSub.Subject) 404 if !test.deliverMsg { 405 msg.Header.Set(MsgTraceOnly, "true") 406 } 407 msg.Reply = reply 408 msg.Data = []byte("hello") 409 nc2.PublishMsg(msg) 410 411 traceMsg := natsNexMsg(t, traceSub, time.Second) 412 var e MsgTraceEvent 413 json.Unmarshal(traceMsg.Data, &e) 414 require_Equal[string](t, e.Server.Name, s.Name()) 415 require_True(t, e.Request.Header != nil) 416 require_Contains(t, e.Ingress().Error, errTxt) 417 require_True(t, e.Egresses() == nil) 418 } 419 420 // Send to a subject that causes permission violation 421 sendMsg("bar.baz", _EMPTY_, "Permissions Violation for Publish to") 422 423 // Send to a subject that is reserved for GW replies 424 sendMsg(gwReplyPrefix+"foo", _EMPTY_, "Permissions Violation for Publish to") 425 426 // Send with a Reply that is reserved 427 sendMsg("foo", replyPrefix+"bar", "Permissions Violation for Publish with Reply of") 428 }) 429 } 430 } 431 432 func TestMsgTraceEgressErrors(t *testing.T) { 433 conf := createConfFile(t, []byte(` 434 port: -1 435 accounts { 436 A { 437 users: [ 438 { 439 user: a 440 password: pwd 441 permissions { 442 subscribe: { 443 allow: ["my.trace.subj", "foo", "bar.>"] 444 deny: "bar.bat" 445 } 446 publish { 447 allow: ["foo", "bar.>"] 448 deny: ["bar.baz"] 449 } 450 } 451 } 452 ] 453 } 454 } 455 `)) 456 s, _ := RunServerWithConfig(conf) 457 defer s.Shutdown() 458 459 nc := natsConnect(t, s.ClientURL(), nats.UserInfo("a", "pwd")) 460 defer nc.Close() 461 462 traceSub := natsSubSync(t, nc, "my.trace.subj") 463 natsFlush(t, nc) 464 465 for _, test := range []struct { 466 name string 467 deliverMsg bool 468 }{ 469 {"just trace", false}, 470 {"deliver msg", true}, 471 } { 472 t.Run(test.name, func(t *testing.T) { 473 sendMsg := func(pubc *nats.Conn, subj, errTxt string) { 474 t.Helper() 475 476 msg := nats.NewMsg(subj) 477 msg.Header.Set(MsgTraceDest, traceSub.Subject) 478 if !test.deliverMsg { 479 msg.Header.Set(MsgTraceOnly, "true") 480 } 481 msg.Data = []byte("hello") 482 pubc.PublishMsg(msg) 483 484 traceMsg := natsNexMsg(t, traceSub, time.Second) 485 var e MsgTraceEvent 486 json.Unmarshal(traceMsg.Data, &e) 487 require_Equal[string](t, e.Server.Name, s.Name()) 488 egress := e.Egresses() 489 require_Equal[int](t, len(egress), 1) 490 require_Contains(t, egress[0].Error, errTxt) 491 } 492 493 // Test no-echo. 494 nc2 := natsConnect(t, s.ClientURL(), nats.UserInfo("a", "pwd"), nats.NoEcho()) 495 defer nc2.Close() 496 natsSubSync(t, nc2, "foo") 497 sendMsg(nc2, "foo", errMsgTraceNoEcho) 498 nc2.Close() 499 500 // Test deny sub. 501 nc2 = natsConnect(t, s.ClientURL(), nats.UserInfo("a", "pwd")) 502 defer nc2.Close() 503 natsSubSync(t, nc2, "bar.>") 504 sendMsg(nc2, "bar.bat", errMsgTraceSubDeny) 505 nc2.Close() 506 507 // Test sub closed 508 nc2 = natsConnect(t, s.ClientURL(), nats.UserInfo("a", "pwd")) 509 defer nc2.Close() 510 natsSubSync(t, nc2, "bar.>") 511 natsFlush(t, nc2) 512 // Aritifially change the closed status of the subscription 513 cid, err := nc2.GetClientID() 514 require_NoError(t, err) 515 c := s.GetClient(cid) 516 c.mu.Lock() 517 for _, sub := range c.subs { 518 if string(sub.subject) == "bar.>" { 519 sub.close() 520 } 521 } 522 c.mu.Unlock() 523 sendMsg(nc2, "bar.bar", errMsgTraceSubClosed) 524 nc2.Close() 525 526 // The following applies only when doing delivery. 527 if test.deliverMsg { 528 // Test auto-unsub exceeded 529 nc2 = natsConnect(t, s.ClientURL(), nats.UserInfo("a", "pwd")) 530 defer nc2.Close() 531 sub := natsSubSync(t, nc2, "bar.>") 532 err := sub.AutoUnsubscribe(10) 533 require_NoError(t, err) 534 natsFlush(t, nc2) 535 536 // Modify sub.nm to be already over the 10 limit 537 cid, err := nc2.GetClientID() 538 require_NoError(t, err) 539 c := s.GetClient(cid) 540 c.mu.Lock() 541 for _, sub := range c.subs { 542 if string(sub.subject) == "bar.>" { 543 sub.nm = 20 544 } 545 } 546 c.mu.Unlock() 547 548 sendMsg(nc2, "bar.bar", errMsgTraceAutoSubExceeded) 549 nc2.Close() 550 551 // Test client closed 552 nc2 = natsConnect(t, s.ClientURL(), nats.UserInfo("a", "pwd")) 553 defer nc2.Close() 554 natsSubSync(t, nc2, "bar.>") 555 cid, err = nc2.GetClientID() 556 require_NoError(t, err) 557 c = s.GetClient(cid) 558 c.mu.Lock() 559 c.out.stc = make(chan struct{}) 560 c.mu.Unlock() 561 msg := nats.NewMsg("bar.bar") 562 msg.Header.Set(MsgTraceDest, traceSub.Subject) 563 if !test.deliverMsg { 564 msg.Header.Set(MsgTraceOnly, "true") 565 } 566 msg.Data = []byte("hello") 567 nc2.PublishMsg(msg) 568 time.Sleep(10 * time.Millisecond) 569 cid, err = nc2.GetClientID() 570 require_NoError(t, err) 571 c = s.GetClient(cid) 572 c.mu.Lock() 573 c.flags.set(closeConnection) 574 c.mu.Unlock() 575 576 traceMsg := natsNexMsg(t, traceSub, time.Second) 577 var e MsgTraceEvent 578 json.Unmarshal(traceMsg.Data, &e) 579 require_Equal[string](t, e.Server.Name, s.Name()) 580 egress := e.Egresses() 581 require_Equal[int](t, len(egress), 1) 582 require_Contains(t, egress[0].Error, errMsgTraceClientClosed) 583 c.mu.Lock() 584 c.flags.clear(closeConnection) 585 c.mu.Unlock() 586 nc2.Close() 587 } 588 }) 589 } 590 } 591 592 func TestMsgTraceWithQueueSub(t *testing.T) { 593 o := DefaultOptions() 594 s := RunServer(o) 595 defer s.Shutdown() 596 597 nc := natsConnect(t, s.ClientURL()) 598 defer nc.Close() 599 600 traceSub := natsSubSync(t, nc, "my.trace.subj") 601 natsFlush(t, nc) 602 603 nc2 := natsConnect(t, s.ClientURL(), nats.Name("sub1")) 604 defer nc2.Close() 605 sub1 := natsQueueSubSync(t, nc2, "foo", "bar") 606 natsFlush(t, nc2) 607 608 nc3 := natsConnect(t, s.ClientURL(), nats.Name("sub2")) 609 defer nc3.Close() 610 sub2 := natsQueueSubSync(t, nc3, "foo", "bar") 611 sub3 := natsQueueSubSync(t, nc3, "*", "baz") 612 natsFlush(t, nc3) 613 614 for _, test := range []struct { 615 name string 616 deliverMsg bool 617 }{ 618 {"just trace", false}, 619 {"deliver msg", true}, 620 } { 621 t.Run(test.name, func(t *testing.T) { 622 msg := nats.NewMsg("foo") 623 msg.Header.Set(MsgTraceDest, traceSub.Subject) 624 if !test.deliverMsg { 625 msg.Header.Set(MsgTraceOnly, "true") 626 } 627 if !test.deliverMsg { 628 msg.Data = []byte("hello1") 629 } else { 630 msg.Data = []byte("hello2") 631 } 632 err := nc.PublishMsg(msg) 633 require_NoError(t, err) 634 635 if test.deliverMsg { 636 // Only one should have got the message... 637 msg1, err1 := sub1.NextMsg(100 * time.Millisecond) 638 msg2, err2 := sub2.NextMsg(100 * time.Millisecond) 639 if err1 == nil && err2 == nil { 640 t.Fatalf("Only one message should have been received") 641 } 642 var val string 643 if msg1 != nil { 644 val = string(msg1.Data) 645 } else { 646 val = string(msg2.Data) 647 } 648 require_Equal[string](t, val, "hello2") 649 // Queue baz should also have received the message 650 msg := natsNexMsg(t, sub3, time.Second) 651 require_Equal[string](t, string(msg.Data), "hello2") 652 } 653 // Check that no (more) messages are received. 654 for _, sub := range []*nats.Subscription{sub1, sub2, sub3} { 655 if msg, err := sub.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 656 t.Fatalf("Expected no message, got %s", msg.Data) 657 } 658 } 659 660 traceMsg := natsNexMsg(t, traceSub, time.Second) 661 var e MsgTraceEvent 662 json.Unmarshal(traceMsg.Data, &e) 663 require_Equal[string](t, e.Server.Name, s.Name()) 664 ingress := e.Ingress() 665 require_True(t, ingress != nil) 666 require_True(t, ingress.Kind == CLIENT) 667 require_Equal[string](t, ingress.Subject, "foo") 668 egress := e.Egresses() 669 require_Equal[int](t, len(egress), 2) 670 var qbar, qbaz int 671 for _, eg := range egress { 672 switch eg.Queue { 673 case "bar": 674 require_Equal[string](t, eg.Subscription, "foo") 675 qbar++ 676 case "baz": 677 require_Equal[string](t, eg.Subscription, "*") 678 qbaz++ 679 default: 680 t.Fatalf("Wrong queue name: %q", eg.Queue) 681 } 682 } 683 require_Equal[int](t, qbar, 1) 684 require_Equal[int](t, qbaz, 1) 685 }) 686 } 687 } 688 689 func TestMsgTraceWithRoutes(t *testing.T) { 690 tmpl := ` 691 port: -1 692 accounts { 693 A { users: [{user:A, password: pwd}] } 694 B { users: [{user:B, password: pwd}] } 695 } 696 cluster { 697 name: "local" 698 port: -1 699 accounts: ["A"] 700 %s 701 } 702 ` 703 conf1 := createConfFile(t, []byte(fmt.Sprintf(tmpl, _EMPTY_))) 704 s1, o1 := RunServerWithConfig(conf1) 705 defer s1.Shutdown() 706 707 conf2 := createConfFile(t, []byte(fmt.Sprintf(tmpl, fmt.Sprintf("routes: [\"nats://127.0.0.1:%d\"]", o1.Cluster.Port)))) 708 s2, _ := RunServerWithConfig(conf2) 709 defer s2.Shutdown() 710 711 checkClusterFormed(t, s1, s2) 712 713 checkDummy := func(user string) { 714 nc := natsConnect(t, s1.ClientURL(), nats.UserInfo(user, "pwd")) 715 defer nc.Close() 716 717 traceSub := natsSubSync(t, nc, "my.trace.subj") 718 natsFlush(t, nc) 719 720 // Send trace message to a dummy subject to check that resulting trace 721 // is as expected. 722 msg := nats.NewMsg("dummy") 723 msg.Header.Set(MsgTraceDest, traceSub.Subject) 724 msg.Header.Set(MsgTraceOnly, "true") 725 msg.Data = []byte("hello!") 726 err := nc.PublishMsg(msg) 727 require_NoError(t, err) 728 729 traceMsg := natsNexMsg(t, traceSub, time.Second) 730 var e MsgTraceEvent 731 json.Unmarshal(traceMsg.Data, &e) 732 require_Equal[string](t, e.Server.Name, s1.Name()) 733 ingress := e.Ingress() 734 require_True(t, ingress != nil) 735 require_True(t, ingress.Kind == CLIENT) 736 // "user" is same than account name in this test. 737 require_Equal[string](t, ingress.Account, user) 738 require_Equal[string](t, ingress.Subject, "dummy") 739 require_True(t, e.SubjectMapping() == nil) 740 require_True(t, e.Egresses() == nil) 741 742 // We should also not get an event from the remote server. 743 if msg, err := traceSub.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 744 t.Fatalf("Expected no message, got %s", msg.Data) 745 } 746 } 747 checkDummy("A") 748 checkDummy("B") 749 750 for _, test := range []struct { 751 name string 752 acc string 753 }{ 754 {"pinned account", "A"}, 755 {"reg account", "B"}, 756 } { 757 t.Run(test.name, func(t *testing.T) { 758 acc := test.acc 759 // Now create subscriptions on both s1 and s2 760 nc2 := natsConnect(t, s2.ClientURL(), nats.UserInfo(acc, "pwd"), nats.Name("sub2")) 761 defer nc2.Close() 762 sub2 := natsQueueSubSync(t, nc2, "foo.*", "my_queue") 763 764 nc3 := natsConnect(t, s2.ClientURL(), nats.UserInfo(acc, "pwd"), nats.Name("sub3")) 765 defer nc3.Close() 766 sub3 := natsQueueSubSync(t, nc3, "*.*", "my_queue_2") 767 768 checkSubInterest(t, s1, acc, "foo.bar", time.Second) 769 770 nc1 := natsConnect(t, s1.ClientURL(), nats.UserInfo(acc, "pwd"), nats.Name("sub1")) 771 defer nc1.Close() 772 sub1 := natsSubSync(t, nc1, "*.bar") 773 774 nct := natsConnect(t, s1.ClientURL(), nats.UserInfo(acc, "pwd"), nats.Name("tracer")) 775 defer nct.Close() 776 traceSub := natsSubSync(t, nct, "my.trace.subj") 777 778 for _, test := range []struct { 779 name string 780 deliverMsg bool 781 }{ 782 {"just trace", false}, 783 {"deliver msg", true}, 784 } { 785 t.Run(test.name, func(t *testing.T) { 786 msg := nats.NewMsg("foo.bar") 787 msg.Header.Set(MsgTraceDest, traceSub.Subject) 788 if !test.deliverMsg { 789 msg.Header.Set(MsgTraceOnly, "true") 790 } 791 msg.Data = []byte("hello!") 792 err := nct.PublishMsg(msg) 793 require_NoError(t, err) 794 795 checkAppMsg := func(sub *nats.Subscription, expected bool) { 796 if expected { 797 appMsg := natsNexMsg(t, sub, time.Second) 798 require_Equal[string](t, string(appMsg.Data), "hello!") 799 } 800 // Check that no (more) messages are received. 801 if msg, err := sub.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 802 t.Fatalf("Did not expect application message, got %s", msg.Data) 803 } 804 } 805 for _, sub := range []*nats.Subscription{sub1, sub2, sub3} { 806 checkAppMsg(sub, test.deliverMsg) 807 } 808 809 check := func() { 810 traceMsg := natsNexMsg(t, traceSub, time.Second) 811 var e MsgTraceEvent 812 json.Unmarshal(traceMsg.Data, &e) 813 ingress := e.Ingress() 814 require_True(t, ingress != nil) 815 switch ingress.Kind { 816 case CLIENT: 817 require_Equal[string](t, e.Server.Name, s1.Name()) 818 require_Equal[string](t, ingress.Account, acc) 819 require_Equal[string](t, ingress.Subject, "foo.bar") 820 egress := e.Egresses() 821 require_Equal[int](t, len(egress), 2) 822 for _, eg := range egress { 823 if eg.Kind == CLIENT { 824 require_Equal[string](t, eg.Name, "sub1") 825 require_Equal[string](t, eg.Subscription, "*.bar") 826 require_Equal[string](t, eg.Queue, _EMPTY_) 827 } else { 828 require_True(t, eg.Kind == ROUTER) 829 require_Equal[string](t, eg.Name, s2.Name()) 830 require_Equal[string](t, eg.Subscription, _EMPTY_) 831 require_Equal[string](t, eg.Queue, _EMPTY_) 832 } 833 } 834 case ROUTER: 835 require_Equal[string](t, e.Server.Name, s2.Name()) 836 require_Equal[string](t, ingress.Account, acc) 837 require_Equal[string](t, ingress.Subject, "foo.bar") 838 egress := e.Egresses() 839 require_Equal[int](t, len(egress), 2) 840 var gotSub2, gotSub3 int 841 for _, eg := range egress { 842 require_True(t, eg.Kind == CLIENT) 843 switch eg.Name { 844 case "sub2": 845 require_Equal[string](t, eg.Subscription, "foo.*") 846 require_Equal[string](t, eg.Queue, "my_queue") 847 gotSub2++ 848 case "sub3": 849 require_Equal[string](t, eg.Subscription, "*.*") 850 require_Equal[string](t, eg.Queue, "my_queue_2") 851 gotSub3++ 852 default: 853 t.Fatalf("Unexpected egress name: %+v", eg) 854 } 855 } 856 require_Equal[int](t, gotSub2, 1) 857 require_Equal[int](t, gotSub3, 1) 858 default: 859 t.Fatalf("Unexpected ingress: %+v", ingress) 860 } 861 } 862 // We should get 2 events. Order is not guaranteed. 863 check() 864 check() 865 // Make sure we are not receiving more traces 866 if tm, err := traceSub.NextMsg(250 * time.Millisecond); err == nil { 867 t.Fatalf("Should not have received trace message: %s", tm.Data) 868 } 869 }) 870 } 871 }) 872 } 873 } 874 875 func TestMsgTraceWithRouteToOldServer(t *testing.T) { 876 msgTraceCheckSupport = true 877 defer func() { msgTraceCheckSupport = false }() 878 tmpl := ` 879 port: -1 880 cluster { 881 name: "local" 882 port: -1 883 pool_size: -1 884 %s 885 } 886 ` 887 conf1 := createConfFile(t, []byte(fmt.Sprintf(tmpl, _EMPTY_))) 888 s1, o1 := RunServerWithConfig(conf1) 889 defer s1.Shutdown() 890 891 conf2 := createConfFile(t, []byte(fmt.Sprintf(tmpl, fmt.Sprintf("routes: [\"nats://127.0.0.1:%d\"]", o1.Cluster.Port)))) 892 o2 := LoadConfig(conf2) 893 // Make this server behave like an older server 894 o2.overrideProto = setServerProtoForTest(MsgTraceProto - 1) 895 s2 := RunServer(o2) 896 defer s2.Shutdown() 897 898 checkClusterFormed(t, s1, s2) 899 900 // Now create subscriptions on both s1 and s2 901 nc2 := natsConnect(t, s2.ClientURL(), nats.Name("sub2")) 902 defer nc2.Close() 903 sub2 := natsSubSync(t, nc2, "foo") 904 905 checkSubInterest(t, s1, globalAccountName, "foo", time.Second) 906 907 nc1 := natsConnect(t, s1.ClientURL(), nats.Name("sub1")) 908 defer nc1.Close() 909 sub1 := natsSubSync(t, nc1, "foo") 910 911 nct := natsConnect(t, s1.ClientURL(), nats.Name("tracer")) 912 defer nct.Close() 913 traceSub := natsSubSync(t, nct, "my.trace.subj") 914 915 for _, test := range []struct { 916 name string 917 deliverMsg bool 918 }{ 919 {"just trace", false}, 920 {"deliver msg", true}, 921 } { 922 t.Run(test.name, func(t *testing.T) { 923 msg := nats.NewMsg("foo") 924 msg.Header.Set(MsgTraceDest, traceSub.Subject) 925 if !test.deliverMsg { 926 msg.Header.Set(MsgTraceOnly, "true") 927 } 928 msg.Data = []byte("hello!") 929 err := nct.PublishMsg(msg) 930 require_NoError(t, err) 931 932 checkAppMsg := func(sub *nats.Subscription, expected bool) { 933 if expected { 934 appMsg := natsNexMsg(t, sub, time.Second) 935 require_Equal[string](t, string(appMsg.Data), "hello!") 936 } 937 // Check that no (more) messages are received. 938 if msg, err := sub.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 939 t.Fatalf("Did not expect application message, got %s", msg.Data) 940 } 941 } 942 // Even if a server does not support tracing, as long as the header 943 // TraceOnly is not set, the message should be forwarded to the remote. 944 for _, sub := range []*nats.Subscription{sub1, sub2} { 945 checkAppMsg(sub, test.deliverMsg) 946 } 947 948 traceMsg := natsNexMsg(t, traceSub, time.Second) 949 var e MsgTraceEvent 950 json.Unmarshal(traceMsg.Data, &e) 951 ingress := e.Ingress() 952 require_True(t, ingress != nil) 953 require_True(t, ingress.Kind == CLIENT) 954 require_Equal[string](t, e.Server.Name, s1.Name()) 955 egress := e.Egresses() 956 require_Equal[int](t, len(egress), 2) 957 for _, ci := range egress { 958 switch ci.Kind { 959 case CLIENT: 960 require_Equal[string](t, ci.Name, "sub1") 961 case ROUTER: 962 require_Equal[string](t, ci.Name, s2.Name()) 963 if test.deliverMsg { 964 require_Contains(t, ci.Error, errMsgTraceNoSupport) 965 } else { 966 require_Contains(t, ci.Error, errMsgTraceOnlyNoSupport) 967 } 968 default: 969 t.Fatalf("Unexpected egress: %+v", ci) 970 } 971 } 972 // We should not get a second trace 973 if msg, err := traceSub.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 974 t.Fatalf("Did not expect other trace, got %s", msg.Data) 975 } 976 }) 977 } 978 } 979 980 func TestMsgTraceWithLeafNode(t *testing.T) { 981 for _, mainTest := range []struct { 982 name string 983 fromHub bool 984 leafUseLocalAcc bool 985 }{ 986 {"from hub", true, false}, 987 {"from leaf", false, false}, 988 {"from hub with local account", true, true}, 989 {"from leaf with local account", false, true}, 990 } { 991 t.Run(mainTest.name, func(t *testing.T) { 992 confHub := createConfFile(t, []byte(` 993 port: -1 994 server_name: "A" 995 accounts { 996 A { users: [{user: "a", password: "pwd"}]} 997 B { users: [{user: "b", password: "pwd"}]} 998 } 999 leafnodes { 1000 port: -1 1001 } 1002 `)) 1003 hub, ohub := RunServerWithConfig(confHub) 1004 defer hub.Shutdown() 1005 1006 var accs string 1007 var lacc string 1008 if mainTest.leafUseLocalAcc { 1009 accs = `accounts { B { users: [{user: "b", password: "pwd"}]} }` 1010 lacc = `account: B` 1011 } 1012 confLeaf := createConfFile(t, []byte(fmt.Sprintf(` 1013 port: -1 1014 server_name: "B" 1015 %s 1016 leafnodes { 1017 remotes [ 1018 { 1019 url: "nats://a:pwd@127.0.0.1:%d" 1020 %s 1021 } 1022 ] 1023 } 1024 `, accs, ohub.LeafNode.Port, lacc))) 1025 leaf, _ := RunServerWithConfig(confLeaf) 1026 defer leaf.Shutdown() 1027 1028 checkLeafNodeConnected(t, hub) 1029 checkLeafNodeConnected(t, leaf) 1030 1031 var s1, s2 *Server 1032 if mainTest.fromHub { 1033 s1, s2 = hub, leaf 1034 } else { 1035 s1, s2 = leaf, hub 1036 } 1037 // Now create subscriptions on both s1 and s2 1038 opts := []nats.Option{nats.Name("sub2")} 1039 var user string 1040 // If fromHub, then it means that s2 is the leaf. 1041 if mainTest.fromHub { 1042 if mainTest.leafUseLocalAcc { 1043 user = "b" 1044 } 1045 } else { 1046 // s2 is the hub, always connect with user "a'" 1047 user = "a" 1048 } 1049 if user != _EMPTY_ { 1050 opts = append(opts, nats.UserInfo(user, "pwd")) 1051 } 1052 nc2 := natsConnect(t, s2.ClientURL(), opts...) 1053 defer nc2.Close() 1054 sub2 := natsSubSync(t, nc2, "foo") 1055 1056 if mainTest.fromHub { 1057 checkSubInterest(t, s1, "A", "foo", time.Second) 1058 } else if mainTest.leafUseLocalAcc { 1059 checkSubInterest(t, s1, "B", "foo", time.Second) 1060 } else { 1061 checkSubInterest(t, s1, globalAccountName, "foo", time.Second) 1062 } 1063 1064 user = _EMPTY_ 1065 opts = []nats.Option{nats.Name("sub1")} 1066 if mainTest.fromHub { 1067 // s1 is the hub, so we need user "a" 1068 user = "a" 1069 } else if mainTest.leafUseLocalAcc { 1070 // s1 is the leaf, we need user "b" if leafUseLocalAcc 1071 user = "b" 1072 } 1073 if user != _EMPTY_ { 1074 opts = append(opts, nats.UserInfo(user, "pwd")) 1075 } 1076 nc1 := natsConnect(t, s1.ClientURL(), opts...) 1077 defer nc1.Close() 1078 sub1 := natsSubSync(t, nc1, "foo") 1079 1080 opts = []nats.Option{nats.Name("tracer")} 1081 if user != _EMPTY_ { 1082 opts = append(opts, nats.UserInfo(user, "pwd")) 1083 } 1084 nct := natsConnect(t, s1.ClientURL(), opts...) 1085 defer nct.Close() 1086 traceSub := natsSubSync(t, nct, "my.trace.subj") 1087 1088 for _, test := range []struct { 1089 name string 1090 deliverMsg bool 1091 }{ 1092 {"just trace", false}, 1093 {"deliver msg", true}, 1094 } { 1095 t.Run(test.name, func(t *testing.T) { 1096 msg := nats.NewMsg("foo") 1097 msg.Header.Set(MsgTraceDest, traceSub.Subject) 1098 if !test.deliverMsg { 1099 msg.Header.Set(MsgTraceOnly, "true") 1100 } 1101 msg.Data = []byte("hello!") 1102 err := nct.PublishMsg(msg) 1103 require_NoError(t, err) 1104 1105 checkAppMsg := func(sub *nats.Subscription, expected bool) { 1106 if expected { 1107 appMsg := natsNexMsg(t, sub, time.Second) 1108 require_Equal[string](t, string(appMsg.Data), "hello!") 1109 } 1110 // Check that no (more) messages are received. 1111 if msg, err := sub.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 1112 t.Fatalf("Did not expect application message, got %s", msg.Data) 1113 } 1114 } 1115 for _, sub := range []*nats.Subscription{sub1, sub2} { 1116 checkAppMsg(sub, test.deliverMsg) 1117 } 1118 1119 check := func() { 1120 traceMsg := natsNexMsg(t, traceSub, time.Second) 1121 var e MsgTraceEvent 1122 json.Unmarshal(traceMsg.Data, &e) 1123 ingress := e.Ingress() 1124 require_True(t, ingress != nil) 1125 switch ingress.Kind { 1126 case CLIENT: 1127 require_Equal[string](t, e.Server.Name, s1.Name()) 1128 egress := e.Egresses() 1129 require_Equal[int](t, len(egress), 2) 1130 for _, eg := range egress { 1131 switch eg.Kind { 1132 case CLIENT: 1133 require_Equal[string](t, eg.Name, "sub1") 1134 case LEAF: 1135 require_Equal[string](t, eg.Name, s2.Name()) 1136 require_Equal[string](t, eg.Error, _EMPTY_) 1137 default: 1138 t.Fatalf("Unexpected egress: %+v", eg) 1139 } 1140 } 1141 case LEAF: 1142 require_Equal[string](t, e.Server.Name, s2.Name()) 1143 require_True(t, ingress.Kind == LEAF) 1144 require_Equal(t, ingress.Name, s1.Name()) 1145 egress := e.Egresses() 1146 require_Equal[int](t, len(egress), 1) 1147 eg := egress[0] 1148 require_True(t, eg.Kind == CLIENT) 1149 require_Equal[string](t, eg.Name, "sub2") 1150 default: 1151 t.Fatalf("Unexpected ingress: %+v", ingress) 1152 } 1153 } 1154 check() 1155 check() 1156 // Make sure we are not receiving more traces 1157 if tm, err := traceSub.NextMsg(250 * time.Millisecond); err == nil { 1158 t.Fatalf("Should not have received trace message: %s", tm.Data) 1159 } 1160 }) 1161 } 1162 }) 1163 } 1164 } 1165 1166 func TestMsgTraceWithLeafNodeToOldServer(t *testing.T) { 1167 msgTraceCheckSupport = true 1168 defer func() { msgTraceCheckSupport = false }() 1169 for _, mainTest := range []struct { 1170 name string 1171 fromHub bool 1172 }{ 1173 {"from hub", true}, 1174 {"from leaf", false}, 1175 } { 1176 t.Run(mainTest.name, func(t *testing.T) { 1177 confHub := createConfFile(t, []byte(` 1178 port: -1 1179 server_name: "A" 1180 leafnodes { 1181 port: -1 1182 } 1183 `)) 1184 ohub := LoadConfig(confHub) 1185 if !mainTest.fromHub { 1186 // Make this server behave like an older server 1187 ohub.overrideProto = setServerProtoForTest(MsgTraceProto - 1) 1188 } 1189 hub := RunServer(ohub) 1190 defer hub.Shutdown() 1191 1192 confLeaf := createConfFile(t, []byte(fmt.Sprintf(` 1193 port: -1 1194 server_name: "B" 1195 leafnodes { 1196 remotes [{url: "nats://127.0.0.1:%d"}] 1197 } 1198 `, ohub.LeafNode.Port))) 1199 oleaf := LoadConfig(confLeaf) 1200 if mainTest.fromHub { 1201 // Make this server behave like an older server 1202 oleaf.overrideProto = setServerProtoForTest(MsgTraceProto - 1) 1203 } 1204 leaf := RunServer(oleaf) 1205 defer leaf.Shutdown() 1206 1207 checkLeafNodeConnected(t, hub) 1208 checkLeafNodeConnected(t, leaf) 1209 1210 var s1, s2 *Server 1211 if mainTest.fromHub { 1212 s1, s2 = hub, leaf 1213 } else { 1214 s1, s2 = leaf, hub 1215 } 1216 1217 // Now create subscriptions on both s1 and s2 1218 nc2 := natsConnect(t, s2.ClientURL(), nats.Name("sub2")) 1219 defer nc2.Close() 1220 sub2 := natsSubSync(t, nc2, "foo") 1221 1222 checkSubInterest(t, s1, globalAccountName, "foo", time.Second) 1223 1224 nc1 := natsConnect(t, s1.ClientURL(), nats.Name("sub1")) 1225 defer nc1.Close() 1226 sub1 := natsSubSync(t, nc1, "foo") 1227 1228 nct := natsConnect(t, s1.ClientURL(), nats.Name("tracer")) 1229 defer nct.Close() 1230 traceSub := natsSubSync(t, nct, "my.trace.subj") 1231 1232 for _, test := range []struct { 1233 name string 1234 deliverMsg bool 1235 }{ 1236 {"just trace", false}, 1237 {"deliver msg", true}, 1238 } { 1239 t.Run(test.name, func(t *testing.T) { 1240 msg := nats.NewMsg("foo") 1241 msg.Header.Set(MsgTraceDest, traceSub.Subject) 1242 if !test.deliverMsg { 1243 msg.Header.Set(MsgTraceOnly, "true") 1244 } 1245 msg.Data = []byte("hello!") 1246 err := nct.PublishMsg(msg) 1247 require_NoError(t, err) 1248 1249 checkAppMsg := func(sub *nats.Subscription, expected bool) { 1250 if expected { 1251 appMsg := natsNexMsg(t, sub, time.Second) 1252 require_Equal[string](t, string(appMsg.Data), "hello!") 1253 } 1254 // Check that no (more) messages are received. 1255 if msg, err := sub.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 1256 t.Fatalf("Did not expect application message, got %s", msg.Data) 1257 } 1258 } 1259 // Even if a server does not support tracing, as long as the header 1260 // TraceOnly is not set, the message should be forwarded to the remote. 1261 for _, sub := range []*nats.Subscription{sub1, sub2} { 1262 checkAppMsg(sub, test.deliverMsg) 1263 } 1264 1265 traceMsg := natsNexMsg(t, traceSub, time.Second) 1266 var e MsgTraceEvent 1267 json.Unmarshal(traceMsg.Data, &e) 1268 ingress := e.Ingress() 1269 require_True(t, ingress != nil) 1270 require_True(t, ingress.Kind == CLIENT) 1271 require_Equal[string](t, e.Server.Name, s1.Name()) 1272 egress := e.Egresses() 1273 require_Equal[int](t, len(egress), 2) 1274 for _, ci := range egress { 1275 switch ci.Kind { 1276 case CLIENT: 1277 require_Equal[string](t, ci.Name, "sub1") 1278 case LEAF: 1279 require_Equal[string](t, ci.Name, s2.Name()) 1280 if test.deliverMsg { 1281 require_Contains(t, ci.Error, errMsgTraceNoSupport) 1282 } else { 1283 require_Contains(t, ci.Error, errMsgTraceOnlyNoSupport) 1284 } 1285 default: 1286 t.Fatalf("Unexpected egress: %+v", ci) 1287 } 1288 } 1289 // We should not get a second trace 1290 if msg, err := traceSub.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 1291 t.Fatalf("Did not expect other trace, got %s", msg.Data) 1292 } 1293 }) 1294 } 1295 }) 1296 } 1297 } 1298 1299 func TestMsgTraceWithLeafNodeDaisyChain(t *testing.T) { 1300 confHub := createConfFile(t, []byte(` 1301 port: -1 1302 server_name: "A" 1303 accounts { 1304 A { users: [{user: "a", password: "pwd"}]} 1305 } 1306 leafnodes { 1307 port: -1 1308 } 1309 `)) 1310 hub, ohub := RunServerWithConfig(confHub) 1311 defer hub.Shutdown() 1312 1313 confLeaf1 := createConfFile(t, []byte(fmt.Sprintf(` 1314 port: -1 1315 server_name: "B" 1316 accounts { 1317 B { users: [{user: "b", password: "pwd"}]} 1318 } 1319 leafnodes { 1320 port: -1 1321 remotes [{url: "nats://a:pwd@127.0.0.1:%d", account: B}] 1322 } 1323 `, ohub.LeafNode.Port))) 1324 leaf1, oleaf1 := RunServerWithConfig(confLeaf1) 1325 defer leaf1.Shutdown() 1326 1327 confLeaf2 := createConfFile(t, []byte(fmt.Sprintf(` 1328 port: -1 1329 server_name: "C" 1330 accounts { 1331 C { users: [{user: "c", password: "pwd"}]} 1332 } 1333 leafnodes { 1334 remotes [{url: "nats://b:pwd@127.0.0.1:%d", account: C}] 1335 } 1336 `, oleaf1.LeafNode.Port))) 1337 leaf2, _ := RunServerWithConfig(confLeaf2) 1338 defer leaf2.Shutdown() 1339 1340 checkLeafNodeConnected(t, hub) 1341 checkLeafNodeConnectedCount(t, leaf1, 2) 1342 checkLeafNodeConnected(t, leaf2) 1343 1344 nct := natsConnect(t, hub.ClientURL(), nats.UserInfo("a", "pwd"), nats.Name("Tracer")) 1345 defer nct.Close() 1346 traceSub := natsSubSync(t, nct, "my.trace.subj") 1347 natsFlush(t, nct) 1348 // Make sure that subject interest travels down to leaf2 1349 checkSubInterest(t, leaf2, "C", traceSub.Subject, time.Second) 1350 1351 nc1 := natsConnect(t, leaf1.ClientURL(), nats.UserInfo("b", "pwd"), nats.Name("sub1")) 1352 defer nc1.Close() 1353 1354 nc2 := natsConnect(t, leaf2.ClientURL(), nats.UserInfo("c", "pwd"), nats.Name("sub2")) 1355 defer nc2.Close() 1356 sub2 := natsQueueSubSync(t, nc2, "foo.bar", "my_queue") 1357 natsFlush(t, nc2) 1358 1359 // Check the subject interest makes it to leaf1 1360 checkSubInterest(t, leaf1, "B", "foo.bar", time.Second) 1361 1362 // Now create the sub on leaf1 1363 sub1 := natsSubSync(t, nc1, "foo.*") 1364 natsFlush(t, nc1) 1365 1366 // Check that subject interest registered on "hub" 1367 checkSubInterest(t, hub, "A", "foo.bar", time.Second) 1368 1369 for _, test := range []struct { 1370 name string 1371 deliverMsg bool 1372 }{ 1373 {"just trace", false}, 1374 {"deliver msg", true}, 1375 } { 1376 t.Run(test.name, func(t *testing.T) { 1377 msg := nats.NewMsg("foo.bar") 1378 msg.Header.Set(MsgTraceDest, traceSub.Subject) 1379 if !test.deliverMsg { 1380 msg.Header.Set(MsgTraceOnly, "true") 1381 } 1382 msg.Data = []byte("hello!") 1383 err := nct.PublishMsg(msg) 1384 require_NoError(t, err) 1385 1386 checkAppMsg := func(sub *nats.Subscription, expected bool) { 1387 if expected { 1388 appMsg := natsNexMsg(t, sub, time.Second) 1389 require_Equal[string](t, string(appMsg.Data), "hello!") 1390 } 1391 // Check that no (more) messages are received. 1392 if msg, err := sub.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 1393 t.Fatalf("Did not expect application message, got %s", msg.Data) 1394 } 1395 } 1396 for _, sub := range []*nats.Subscription{sub1, sub2} { 1397 checkAppMsg(sub, test.deliverMsg) 1398 } 1399 1400 check := func() { 1401 traceMsg := natsNexMsg(t, traceSub, time.Second) 1402 var e MsgTraceEvent 1403 json.Unmarshal(traceMsg.Data, &e) 1404 1405 ingress := e.Ingress() 1406 require_True(t, ingress != nil) 1407 switch ingress.Kind { 1408 case CLIENT: 1409 require_Equal[string](t, e.Server.Name, hub.Name()) 1410 require_Equal[string](t, ingress.Name, "Tracer") 1411 require_Equal[string](t, ingress.Account, "A") 1412 require_Equal[string](t, ingress.Subject, "foo.bar") 1413 egress := e.Egresses() 1414 require_Equal[int](t, len(egress), 1) 1415 eg := egress[0] 1416 require_True(t, eg.Kind == LEAF) 1417 require_Equal[string](t, eg.Name, leaf1.Name()) 1418 require_Equal[string](t, eg.Account, _EMPTY_) 1419 require_Equal[string](t, eg.Subscription, _EMPTY_) 1420 case LEAF: 1421 switch e.Server.Name { 1422 case leaf1.Name(): 1423 require_Equal(t, ingress.Name, hub.Name()) 1424 require_Equal(t, ingress.Account, "B") 1425 require_Equal[string](t, ingress.Subject, "foo.bar") 1426 egress := e.Egresses() 1427 require_Equal[int](t, len(egress), 2) 1428 for _, eg := range egress { 1429 switch eg.Kind { 1430 case CLIENT: 1431 require_Equal[string](t, eg.Name, "sub1") 1432 require_Equal[string](t, eg.Subscription, "foo.*") 1433 require_Equal[string](t, eg.Queue, _EMPTY_) 1434 case LEAF: 1435 require_Equal[string](t, eg.Name, leaf2.Name()) 1436 require_Equal[string](t, eg.Account, _EMPTY_) 1437 require_Equal[string](t, eg.Subscription, _EMPTY_) 1438 require_Equal[string](t, eg.Queue, _EMPTY_) 1439 default: 1440 t.Fatalf("Unexpected egress: %+v", eg) 1441 } 1442 } 1443 case leaf2.Name(): 1444 require_Equal(t, ingress.Name, leaf1.Name()) 1445 require_Equal(t, ingress.Account, "C") 1446 require_Equal[string](t, ingress.Subject, "foo.bar") 1447 egress := e.Egresses() 1448 require_Equal[int](t, len(egress), 1) 1449 eg := egress[0] 1450 require_True(t, eg.Kind == CLIENT) 1451 require_Equal[string](t, eg.Name, "sub2") 1452 require_Equal[string](t, eg.Subscription, "foo.bar") 1453 require_Equal[string](t, eg.Queue, "my_queue") 1454 default: 1455 t.Fatalf("Unexpected ingress: %+v", ingress) 1456 } 1457 default: 1458 t.Fatalf("Unexpected ingress: %+v", ingress) 1459 } 1460 } 1461 check() 1462 check() 1463 check() 1464 // Make sure we are not receiving more traces 1465 if tm, err := traceSub.NextMsg(250 * time.Millisecond); err == nil { 1466 t.Fatalf("Should not have received trace message: %s", tm.Data) 1467 } 1468 }) 1469 } 1470 } 1471 1472 func TestMsgTraceWithGateways(t *testing.T) { 1473 o2 := testDefaultOptionsForGateway("B") 1474 o2.NoSystemAccount = false 1475 s2 := runGatewayServer(o2) 1476 defer s2.Shutdown() 1477 1478 o1 := testGatewayOptionsFromToWithServers(t, "A", "B", s2) 1479 o1.NoSystemAccount = false 1480 s1 := runGatewayServer(o1) 1481 defer s1.Shutdown() 1482 1483 waitForOutboundGateways(t, s1, 1, time.Second) 1484 waitForInboundGateways(t, s2, 1, time.Second) 1485 waitForOutboundGateways(t, s2, 1, time.Second) 1486 1487 nc2 := natsConnect(t, s2.ClientURL(), nats.Name("sub2")) 1488 defer nc2.Close() 1489 sub2 := natsQueueSubSync(t, nc2, "foo.*", "my_queue") 1490 1491 nc3 := natsConnect(t, s2.ClientURL(), nats.Name("sub3")) 1492 defer nc3.Close() 1493 sub3 := natsQueueSubSync(t, nc3, "*.*", "my_queue_2") 1494 1495 nc1 := natsConnect(t, s1.ClientURL(), nats.Name("sub1")) 1496 defer nc1.Close() 1497 sub1 := natsSubSync(t, nc1, "*.bar") 1498 1499 nct := natsConnect(t, s1.ClientURL(), nats.Name("tracer")) 1500 defer nct.Close() 1501 traceSub := natsSubSync(t, nct, "my.trace.subj") 1502 1503 for _, test := range []struct { 1504 name string 1505 deliverMsg bool 1506 }{ 1507 {"just trace", false}, 1508 {"deliver msg", true}, 1509 } { 1510 t.Run(test.name, func(t *testing.T) { 1511 msg := nats.NewMsg("foo.bar") 1512 msg.Header.Set(MsgTraceDest, traceSub.Subject) 1513 if !test.deliverMsg { 1514 msg.Header.Set(MsgTraceOnly, "true") 1515 } 1516 msg.Data = []byte("hello!") 1517 err := nct.PublishMsg(msg) 1518 require_NoError(t, err) 1519 1520 checkAppMsg := func(sub *nats.Subscription, expected bool) { 1521 if expected { 1522 appMsg := natsNexMsg(t, sub, time.Second) 1523 require_Equal[string](t, string(appMsg.Data), "hello!") 1524 } 1525 // Check that no (more) messages are received. 1526 if msg, err := sub.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 1527 t.Fatalf("Did not expect application message, got %s", msg.Data) 1528 } 1529 } 1530 for _, sub := range []*nats.Subscription{sub1, sub2, sub3} { 1531 checkAppMsg(sub, test.deliverMsg) 1532 } 1533 1534 check := func() { 1535 traceMsg := natsNexMsg(t, traceSub, time.Second) 1536 var e MsgTraceEvent 1537 json.Unmarshal(traceMsg.Data, &e) 1538 1539 ingress := e.Ingress() 1540 require_True(t, ingress != nil) 1541 switch ingress.Kind { 1542 case CLIENT: 1543 require_Equal[string](t, e.Server.Name, s1.Name()) 1544 require_Equal[string](t, ingress.Account, globalAccountName) 1545 require_Equal[string](t, ingress.Subject, "foo.bar") 1546 egress := e.Egresses() 1547 require_Equal[int](t, len(egress), 2) 1548 for _, eg := range egress { 1549 switch eg.Kind { 1550 case CLIENT: 1551 require_Equal[string](t, eg.Name, "sub1") 1552 require_Equal[string](t, eg.Subscription, "*.bar") 1553 require_Equal[string](t, eg.Queue, _EMPTY_) 1554 case GATEWAY: 1555 require_Equal[string](t, eg.Name, s2.Name()) 1556 require_Equal[string](t, eg.Error, _EMPTY_) 1557 require_Equal[string](t, eg.Subscription, _EMPTY_) 1558 require_Equal[string](t, eg.Queue, _EMPTY_) 1559 default: 1560 t.Fatalf("Unexpected egress: %+v", eg) 1561 } 1562 } 1563 case GATEWAY: 1564 require_Equal[string](t, e.Server.Name, s2.Name()) 1565 require_Equal[string](t, ingress.Account, globalAccountName) 1566 require_Equal[string](t, ingress.Subject, "foo.bar") 1567 egress := e.Egresses() 1568 require_Equal[int](t, len(egress), 2) 1569 var gotSub2, gotSub3 int 1570 for _, eg := range egress { 1571 require_True(t, eg.Kind == CLIENT) 1572 switch eg.Name { 1573 case "sub2": 1574 require_Equal[string](t, eg.Subscription, "foo.*") 1575 require_Equal[string](t, eg.Queue, "my_queue") 1576 gotSub2++ 1577 case "sub3": 1578 require_Equal[string](t, eg.Subscription, "*.*") 1579 require_Equal[string](t, eg.Queue, "my_queue_2") 1580 gotSub3++ 1581 default: 1582 t.Fatalf("Unexpected egress name: %+v", eg) 1583 } 1584 } 1585 require_Equal[int](t, gotSub2, 1) 1586 require_Equal[int](t, gotSub3, 1) 1587 1588 default: 1589 t.Fatalf("Unexpected ingress: %+v", ingress) 1590 } 1591 } 1592 // We should get 2 events 1593 check() 1594 check() 1595 // Make sure we are not receiving more traces 1596 if tm, err := traceSub.NextMsg(250 * time.Millisecond); err == nil { 1597 t.Fatalf("Should not have received trace message: %s", tm.Data) 1598 } 1599 }) 1600 } 1601 } 1602 1603 func TestMsgTraceWithGatewayToOldServer(t *testing.T) { 1604 msgTraceCheckSupport = true 1605 defer func() { msgTraceCheckSupport = false }() 1606 1607 o2 := testDefaultOptionsForGateway("B") 1608 o2.NoSystemAccount = false 1609 // Make this server behave like an older server 1610 o2.overrideProto = setServerProtoForTest(MsgTraceProto - 1) 1611 s2 := runGatewayServer(o2) 1612 defer s2.Shutdown() 1613 1614 o1 := testGatewayOptionsFromToWithServers(t, "A", "B", s2) 1615 o1.NoSystemAccount = false 1616 s1 := runGatewayServer(o1) 1617 defer s1.Shutdown() 1618 1619 waitForOutboundGateways(t, s1, 1, time.Second) 1620 waitForInboundGateways(t, s2, 1, time.Second) 1621 waitForOutboundGateways(t, s2, 1, time.Second) 1622 1623 nc2 := natsConnect(t, s2.ClientURL(), nats.Name("sub2")) 1624 defer nc2.Close() 1625 sub2 := natsSubSync(t, nc2, "foo") 1626 1627 nc1 := natsConnect(t, s1.ClientURL(), nats.Name("sub1")) 1628 defer nc1.Close() 1629 sub1 := natsSubSync(t, nc1, "foo") 1630 1631 nct := natsConnect(t, s1.ClientURL(), nats.Name("tracer")) 1632 defer nct.Close() 1633 traceSub := natsSubSync(t, nct, "my.trace.subj") 1634 1635 for _, test := range []struct { 1636 name string 1637 deliverMsg bool 1638 }{ 1639 {"just trace", false}, 1640 {"deliver msg", true}, 1641 } { 1642 t.Run(test.name, func(t *testing.T) { 1643 msg := nats.NewMsg("foo") 1644 msg.Header.Set(MsgTraceDest, traceSub.Subject) 1645 if !test.deliverMsg { 1646 msg.Header.Set(MsgTraceOnly, "true") 1647 } 1648 msg.Data = []byte("hello!") 1649 err := nct.PublishMsg(msg) 1650 require_NoError(t, err) 1651 1652 checkAppMsg := func(sub *nats.Subscription, expected bool) { 1653 if expected { 1654 appMsg := natsNexMsg(t, sub, time.Second) 1655 require_Equal[string](t, string(appMsg.Data), "hello!") 1656 } 1657 // Check that no (more) messages are received. 1658 if msg, err := sub.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 1659 t.Fatalf("Did not expect application message, got %s", msg.Data) 1660 } 1661 } 1662 // Even if a server does not support tracing, as long as the header 1663 // TraceOnly is not set, the message should be forwarded to the remote. 1664 for _, sub := range []*nats.Subscription{sub1, sub2} { 1665 checkAppMsg(sub, test.deliverMsg) 1666 } 1667 1668 traceMsg := natsNexMsg(t, traceSub, time.Second) 1669 var e MsgTraceEvent 1670 json.Unmarshal(traceMsg.Data, &e) 1671 ingress := e.Ingress() 1672 require_True(t, ingress != nil) 1673 switch ingress.Kind { 1674 case CLIENT: 1675 require_Equal[string](t, e.Server.Name, s1.Name()) 1676 egress := e.Egresses() 1677 require_Equal[int](t, len(egress), 2) 1678 for _, ci := range egress { 1679 switch ci.Kind { 1680 case CLIENT: 1681 require_Equal[string](t, ci.Name, "sub1") 1682 case GATEWAY: 1683 require_Equal[string](t, ci.Name, s2.Name()) 1684 if test.deliverMsg { 1685 require_Contains(t, ci.Error, errMsgTraceNoSupport) 1686 } else { 1687 require_Contains(t, ci.Error, errMsgTraceOnlyNoSupport) 1688 } 1689 default: 1690 t.Fatalf("Unexpected egress: %+v", ci) 1691 } 1692 } 1693 default: 1694 t.Fatalf("Unexpected ingress: %+v", ingress) 1695 } 1696 // We should not get a second trace 1697 if msg, err := traceSub.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 1698 t.Fatalf("Did not expect other trace, got %s", msg.Data) 1699 } 1700 }) 1701 } 1702 } 1703 1704 func TestMsgTraceServiceImport(t *testing.T) { 1705 tmpl := ` 1706 listen: 127.0.0.1:-1 1707 accounts { 1708 A { 1709 users: [{user: a, password: pwd}] 1710 exports: [ { service: ">", allow_trace: %v} ] 1711 mappings = { 1712 bar: bozo 1713 } 1714 } 1715 B { 1716 users: [{user: b, password: pwd}] 1717 imports: [ { service: {account: "A", subject:">"} } ] 1718 exports: [ { service: ">", allow_trace: %v} ] 1719 } 1720 C { 1721 users: [{user: c, password: pwd}] 1722 exports: [ { service: ">", allow_trace: %v } ] 1723 } 1724 D { 1725 users: [{user: d, password: pwd}] 1726 imports: [ 1727 { service: {account: "B", subject:"bar"}, to: baz } 1728 { service: {account: "C", subject:">"} } 1729 ] 1730 mappings = { 1731 bat: baz 1732 } 1733 } 1734 } 1735 ` 1736 conf := createConfFile(t, []byte(fmt.Sprintf(tmpl, false, false, false))) 1737 s, _ := RunServerWithConfig(conf) 1738 defer s.Shutdown() 1739 1740 nc := natsConnect(t, s.ClientURL(), nats.UserInfo("d", "pwd"), nats.Name("Requestor")) 1741 defer nc.Close() 1742 1743 traceSub := natsSubSync(t, nc, "my.trace.subj") 1744 sub := natsSubSync(t, nc, "my.service.response.inbox") 1745 1746 nc2 := natsConnect(t, s.ClientURL(), nats.UserInfo("a", "pwd"), nats.Name("ServiceA")) 1747 defer nc2.Close() 1748 recv := int32(0) 1749 natsQueueSub(t, nc2, "*", "my_queue", func(m *nats.Msg) { 1750 atomic.AddInt32(&recv, 1) 1751 m.Respond(m.Data) 1752 }) 1753 natsFlush(t, nc2) 1754 1755 nc3 := natsConnect(t, s.ClientURL(), nats.UserInfo("c", "pwd"), nats.Name("ServiceC")) 1756 defer nc3.Close() 1757 natsSub(t, nc3, "baz", func(m *nats.Msg) { 1758 atomic.AddInt32(&recv, 1) 1759 m.Respond(m.Data) 1760 }) 1761 natsFlush(t, nc3) 1762 1763 for mainIter, mainTest := range []struct { 1764 name string 1765 allow bool 1766 }{ 1767 {"not allowed", false}, 1768 {"allowed", true}, 1769 {"not allowed again", false}, 1770 } { 1771 atomic.StoreInt32(&recv, 0) 1772 t.Run(mainTest.name, func(t *testing.T) { 1773 for _, test := range []struct { 1774 name string 1775 deliverMsg bool 1776 }{ 1777 {"just trace", false}, 1778 {"deliver msg", true}, 1779 } { 1780 t.Run(test.name, func(t *testing.T) { 1781 msg := nats.NewMsg("bat") 1782 msg.Header.Set(MsgTraceDest, traceSub.Subject) 1783 if !test.deliverMsg { 1784 msg.Header.Set(MsgTraceOnly, "true") 1785 } 1786 if !test.deliverMsg { 1787 msg.Data = []byte("request1") 1788 } else { 1789 msg.Data = []byte("request2") 1790 } 1791 msg.Reply = sub.Subject 1792 1793 err := nc.PublishMsg(msg) 1794 require_NoError(t, err) 1795 1796 if test.deliverMsg { 1797 for i := 0; i < 2; i++ { 1798 appMsg := natsNexMsg(t, sub, time.Second) 1799 require_Equal[string](t, string(appMsg.Data), "request2") 1800 } 1801 } 1802 // Check that no (more) messages are received. 1803 if msg, err := sub.NextMsg(100 * time.Millisecond); msg != nil || err != nats.ErrTimeout { 1804 t.Fatalf("Did not expect application message, got msg=%v err=%v", msg, err) 1805 } 1806 if !test.deliverMsg { 1807 // Just to make sure that message was not delivered to service 1808 // responders, wait a bit and check the recv value. 1809 time.Sleep(50 * time.Millisecond) 1810 if n := atomic.LoadInt32(&recv); n != 0 { 1811 t.Fatalf("Expected no message to be received, but service callback fired %d times", n) 1812 } 1813 } 1814 1815 traceMsg := natsNexMsg(t, traceSub, time.Second) 1816 var e MsgTraceEvent 1817 json.Unmarshal(traceMsg.Data, &e) 1818 1819 require_Equal[string](t, e.Server.Name, s.Name()) 1820 ingress := e.Ingress() 1821 require_True(t, ingress != nil) 1822 require_True(t, ingress.Kind == CLIENT) 1823 require_Equal[string](t, ingress.Account, "D") 1824 require_Equal[string](t, ingress.Subject, "bat") 1825 sm := e.SubjectMapping() 1826 require_True(t, sm != nil) 1827 require_Equal[string](t, sm.MappedTo, "baz") 1828 simps := e.ServiceImports() 1829 require_True(t, simps != nil) 1830 var expectedServices int 1831 if mainTest.allow { 1832 expectedServices = 3 1833 } else { 1834 expectedServices = 2 1835 } 1836 require_Equal[int](t, len(simps), expectedServices) 1837 for _, si := range simps { 1838 require_True(t, si.Timestamp != time.Time{}) 1839 switch si.Account { 1840 case "C": 1841 require_Equal[string](t, si.From, "baz") 1842 require_Equal[string](t, si.To, "baz") 1843 case "B": 1844 require_Equal[string](t, si.From, "baz") 1845 require_Equal[string](t, si.To, "bar") 1846 case "A": 1847 if !mainTest.allow { 1848 t.Fatalf("Without allow_trace, we should not see service for account A") 1849 } 1850 require_Equal[string](t, si.From, "bar") 1851 require_Equal[string](t, si.To, "bozo") 1852 default: 1853 t.Fatalf("Unexpected account name: %s", si.Account) 1854 } 1855 } 1856 egress := e.Egresses() 1857 if !mainTest.allow { 1858 require_Equal[int](t, len(egress), 0) 1859 } else { 1860 require_Equal[int](t, len(egress), 2) 1861 var gotA, gotC bool 1862 for _, eg := range egress { 1863 // All Egress should be clients 1864 require_True(t, eg.Kind == CLIENT) 1865 // We should have one for ServiceA and one for ServiceC 1866 if eg.Name == "ServiceA" { 1867 require_Equal[string](t, eg.Account, "A") 1868 require_Equal[string](t, eg.Subscription, "*") 1869 require_Equal[string](t, eg.Queue, "my_queue") 1870 gotA = true 1871 } else if eg.Name == "ServiceC" { 1872 require_Equal[string](t, eg.Account, "C") 1873 require_Equal[string](t, eg.Queue, _EMPTY_) 1874 gotC = true 1875 } 1876 } 1877 if !gotA { 1878 t.Fatalf("Did not get Egress for serviceA: %+v", egress) 1879 } 1880 if !gotC { 1881 t.Fatalf("Did not get Egress for serviceC: %+v", egress) 1882 } 1883 } 1884 1885 // Make sure we properly remove the responses. 1886 checkResp := func(an string) { 1887 acc, err := s.lookupAccount(an) 1888 require_NoError(t, err) 1889 checkFor(t, time.Second, 15*time.Millisecond, func() error { 1890 if n := acc.NumPendingAllResponses(); n != 0 { 1891 return fmt.Errorf("Still %d responses pending for account %q on server %s", n, acc, s) 1892 } 1893 return nil 1894 }) 1895 } 1896 for _, acc := range []string{"A", "B", "C", "D"} { 1897 checkResp(acc) 1898 } 1899 }) 1900 } 1901 switch mainIter { 1902 case 0: 1903 reloadUpdateConfig(t, s, conf, fmt.Sprintf(tmpl, true, true, true)) 1904 case 1: 1905 reloadUpdateConfig(t, s, conf, fmt.Sprintf(tmpl, false, false, false)) 1906 } 1907 }) 1908 } 1909 } 1910 1911 func TestMsgTraceServiceImportWithSuperCluster(t *testing.T) { 1912 for _, mainTest := range []struct { 1913 name string 1914 allowStr string 1915 allow bool 1916 }{ 1917 {"allowed", "true", true}, 1918 {"not allowed", "false", false}, 1919 } { 1920 t.Run(mainTest.name, func(t *testing.T) { 1921 tmpl := ` 1922 listen: 127.0.0.1:-1 1923 server_name: %s 1924 jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} 1925 1926 cluster { 1927 name: %s 1928 listen: 127.0.0.1:%d 1929 routes = [%s] 1930 } 1931 accounts { 1932 A { 1933 users: [{user: a, password: pwd}] 1934 exports: [ { service: ">", allow_trace: ` + mainTest.allowStr + ` } ] 1935 mappings = { 1936 bar: bozo 1937 } 1938 trace_dest: "a.trace.subj" 1939 } 1940 B { 1941 users: [{user: b, password: pwd}] 1942 imports: [ { service: {account: "A", subject:">"} } ] 1943 exports: [ { service: ">" , allow_trace: ` + mainTest.allowStr + ` } ] 1944 trace_dest: "b.trace.subj" 1945 } 1946 C { 1947 users: [{user: c, password: pwd}] 1948 exports: [ { service: ">" , allow_trace: ` + mainTest.allowStr + ` } ] 1949 trace_dest: "c.trace.subj" 1950 } 1951 D { 1952 users: [{user: d, password: pwd}] 1953 imports: [ 1954 { service: {account: "B", subject:"bar"}, to: baz } 1955 { service: {account: "C", subject:">"} } 1956 ] 1957 mappings = { 1958 bat: baz 1959 } 1960 trace_dest: "d.trace.subj" 1961 } 1962 $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } 1963 } 1964 ` 1965 sc := createJetStreamSuperClusterWithTemplate(t, tmpl, 3, 2) 1966 defer sc.shutdown() 1967 1968 sfornc := sc.clusters[0].servers[0] 1969 nc := natsConnect(t, sfornc.ClientURL(), nats.UserInfo("d", "pwd"), nats.Name("Requestor")) 1970 defer nc.Close() 1971 1972 traceSub := natsSubSync(t, nc, "my.trace.subj") 1973 sub := natsSubSync(t, nc, "my.service.response.inbox") 1974 1975 sfornc2 := sc.clusters[0].servers[1] 1976 nc2 := natsConnect(t, sfornc2.ClientURL(), nats.UserInfo("a", "pwd"), nats.Name("ServiceA")) 1977 defer nc2.Close() 1978 subSvcA := natsQueueSubSync(t, nc2, "*", "my_queue") 1979 natsFlush(t, nc2) 1980 1981 sfornc3 := sc.clusters[1].servers[0] 1982 nc3 := natsConnect(t, sfornc3.ClientURL(), nats.UserInfo("c", "pwd"), nats.Name("ServiceC")) 1983 defer nc3.Close() 1984 subSvcC := natsSubSync(t, nc3, "baz") 1985 natsFlush(t, nc3) 1986 1987 // Create a subscription for each account trace destination to make 1988 // sure that we are not sending it there. 1989 var accSubs []*nats.Subscription 1990 for _, user := range []string{"a", "b", "c", "d"} { 1991 nc := natsConnect(t, sfornc.ClientURL(), nats.UserInfo(user, "pwd")) 1992 defer nc.Close() 1993 accSubs = append(accSubs, natsSubSync(t, nc, user+".trace.subj")) 1994 } 1995 1996 for _, test := range []struct { 1997 name string 1998 deliverMsg bool 1999 }{ 2000 {"just trace", false}, 2001 {"deliver msg", true}, 2002 } { 2003 t.Run(test.name, func(t *testing.T) { 2004 msg := nats.NewMsg("bat") 2005 msg.Header.Set(MsgTraceDest, traceSub.Subject) 2006 if !test.deliverMsg { 2007 msg.Header.Set(MsgTraceOnly, "true") 2008 } 2009 // We add the traceParentHdr header to make sure that it is 2010 // deactivated in addition to the Nats-Trace-Dest header too 2011 // when needed. 2012 traceParentHdrVal := "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01" 2013 msg.Header.Set(traceParentHdr, traceParentHdrVal) 2014 if !test.deliverMsg { 2015 msg.Data = []byte("request1") 2016 } else { 2017 msg.Data = []byte("request2") 2018 } 2019 msg.Reply = sub.Subject 2020 2021 err := nc.PublishMsg(msg) 2022 require_NoError(t, err) 2023 2024 if test.deliverMsg { 2025 processSvc := func(sub *nats.Subscription) { 2026 t.Helper() 2027 appMsg := natsNexMsg(t, sub, time.Second) 2028 // This test causes a message to be routed to the 2029 // service responders. When not allowing, we need 2030 // to make sure that the trace header has been 2031 // disabled. Not receiving the trace event from 2032 // the remote is not enough to verify since the 2033 // trace would not reach the origin server because 2034 // the origin account header will not be present. 2035 if mainTest.allow { 2036 if hv := appMsg.Header.Get(MsgTraceDest); hv != traceSub.Subject { 2037 t.Fatalf("Expecting header with %q, but got %q", traceSub.Subject, hv) 2038 } 2039 if hv := appMsg.Header.Get(traceParentHdr); hv != traceParentHdrVal { 2040 t.Fatalf("Expecting header with %q, but got %q", traceParentHdrVal, hv) 2041 } 2042 } else { 2043 if hv := appMsg.Header.Get(MsgTraceDest); hv != _EMPTY_ { 2044 t.Fatalf("Expecting no header, but header was present with value: %q", hv) 2045 } 2046 if hv := appMsg.Header.Get(traceParentHdr); hv != _EMPTY_ { 2047 t.Fatalf("Expecting no header, but header was present with value: %q", hv) 2048 } 2049 // We don't really need to check that, but we 2050 // should see the header with the first letter 2051 // being an `X`. 2052 hnb := []byte(MsgTraceDest) 2053 hnb[0] = 'X' 2054 hn := string(hnb) 2055 if hv := appMsg.Header.Get(hn); hv != traceSub.Subject { 2056 t.Fatalf("Expected header %q to be %q, got %q", hn, traceSub.Subject, hv) 2057 } 2058 hnb = []byte(traceParentHdr) 2059 hnb[0] = 'X' 2060 hn = string(hnb) 2061 if hv := appMsg.Header.Get(hn); hv != traceParentHdrVal { 2062 t.Fatalf("Expected header %q to be %q, got %q", hn, traceParentHdrVal, hv) 2063 } 2064 } 2065 appMsg.Respond(appMsg.Data) 2066 } 2067 processSvc(subSvcA) 2068 processSvc(subSvcC) 2069 2070 for i := 0; i < 2; i++ { 2071 appMsg := natsNexMsg(t, sub, time.Second) 2072 require_Equal[string](t, string(appMsg.Data), "request2") 2073 } 2074 } 2075 // Check that no (more) messages are received. 2076 if msg, err := sub.NextMsg(100 * time.Millisecond); msg != nil || err != nats.ErrTimeout { 2077 t.Fatalf("Did not expect application message, got msg=%v err=%v", msg, err) 2078 } 2079 if !test.deliverMsg { 2080 // Just to make sure that message was not delivered to service 2081 // responders, wait a bit and check the recv value. 2082 time.Sleep(50 * time.Millisecond) 2083 for _, sub := range []*nats.Subscription{subSvcA, subSvcC} { 2084 if msg, err := sub.NextMsg(250 * time.Millisecond); err == nil { 2085 t.Fatalf("Expected no message to be received, but service subscription got %s", msg.Data) 2086 } 2087 } 2088 } 2089 2090 check := func() { 2091 traceMsg := natsNexMsg(t, traceSub, time.Second) 2092 var e MsgTraceEvent 2093 json.Unmarshal(traceMsg.Data, &e) 2094 2095 ingress := e.Ingress() 2096 require_True(t, ingress != nil) 2097 switch ingress.Kind { 2098 case CLIENT: 2099 require_Equal[string](t, e.Server.Name, sfornc.Name()) 2100 require_Equal[string](t, ingress.Account, "D") 2101 require_Equal[string](t, ingress.Subject, "bat") 2102 sm := e.SubjectMapping() 2103 require_True(t, sm != nil) 2104 require_Equal[string](t, sm.MappedTo, "baz") 2105 simps := e.ServiceImports() 2106 require_True(t, simps != nil) 2107 var expectedServices int 2108 if mainTest.allow { 2109 expectedServices = 3 2110 } else { 2111 expectedServices = 2 2112 } 2113 require_Equal[int](t, len(simps), expectedServices) 2114 for _, si := range simps { 2115 switch si.Account { 2116 case "C": 2117 require_Equal[string](t, si.From, "baz") 2118 require_Equal[string](t, si.To, "baz") 2119 case "B": 2120 require_Equal[string](t, si.From, "baz") 2121 require_Equal[string](t, si.To, "bar") 2122 case "A": 2123 if !mainTest.allow { 2124 t.Fatalf("Without allow_trace, we should not see service for account A") 2125 } 2126 require_Equal[string](t, si.From, "bar") 2127 require_Equal[string](t, si.To, "bozo") 2128 default: 2129 t.Fatalf("Unexpected account name: %s", si.Account) 2130 } 2131 } 2132 egress := e.Egresses() 2133 if !mainTest.allow { 2134 require_Equal[int](t, len(egress), 0) 2135 } else { 2136 require_Equal[int](t, len(egress), 2) 2137 for _, eg := range egress { 2138 switch eg.Kind { 2139 case ROUTER: 2140 require_Equal[string](t, eg.Name, sfornc2.Name()) 2141 require_Equal[string](t, eg.Account, _EMPTY_) 2142 case GATEWAY: 2143 require_Equal[string](t, eg.Name, sfornc3.Name()) 2144 require_Equal[string](t, eg.Account, _EMPTY_) 2145 } 2146 } 2147 } 2148 case ROUTER: 2149 require_Equal[string](t, e.Server.Name, sfornc2.Name()) 2150 require_Equal[string](t, ingress.Account, "A") 2151 require_Equal[string](t, ingress.Subject, "bozo") 2152 egress := e.Egresses() 2153 require_Equal[int](t, len(egress), 1) 2154 eg := egress[0] 2155 require_True(t, eg.Kind == CLIENT) 2156 require_Equal[string](t, eg.Name, "ServiceA") 2157 require_Equal[string](t, eg.Account, _EMPTY_) 2158 require_Equal[string](t, eg.Subscription, "*") 2159 require_Equal[string](t, eg.Queue, "my_queue") 2160 case GATEWAY: 2161 require_Equal[string](t, e.Server.Name, sfornc3.Name()) 2162 require_Equal[string](t, ingress.Account, "C") 2163 require_Equal[string](t, ingress.Subject, "baz") 2164 egress := e.Egresses() 2165 require_Equal[int](t, len(egress), 1) 2166 eg := egress[0] 2167 require_True(t, eg.Kind == CLIENT) 2168 require_Equal[string](t, eg.Name, "ServiceC") 2169 require_Equal[string](t, eg.Account, _EMPTY_) 2170 require_Equal[string](t, eg.Subscription, "baz") 2171 require_Equal[string](t, eg.Queue, _EMPTY_) 2172 default: 2173 t.Fatalf("Unexpected ingress: %+v", ingress) 2174 } 2175 } 2176 // We should receive 3 events when allowed, a single when not. 2177 check() 2178 if mainTest.allow { 2179 check() 2180 check() 2181 } 2182 // Make sure we are not receiving more traces 2183 if tm, err := traceSub.NextMsg(250 * time.Millisecond); err == nil { 2184 t.Fatalf("Should not have received trace message: %s", tm.Data) 2185 } 2186 // Make sure that we never receive on any of the account 2187 // trace destination's sub. 2188 for _, sub := range accSubs { 2189 if tm, err := sub.NextMsg(100 * time.Millisecond); err == nil { 2190 t.Fatalf("Should not have received trace message on account's trace sub, got %s", tm.Data) 2191 } 2192 } 2193 // Make sure we properly remove the responses. 2194 checkResp := func(an string) { 2195 for _, s := range []*Server{sfornc, sfornc2, sfornc3} { 2196 acc, err := s.lookupAccount(an) 2197 require_NoError(t, err) 2198 checkFor(t, time.Second, 15*time.Millisecond, func() error { 2199 if n := acc.NumPendingAllResponses(); n != 0 { 2200 return fmt.Errorf("Still %d responses pending for account %q on server %s", n, acc, s) 2201 } 2202 return nil 2203 }) 2204 } 2205 } 2206 for _, acc := range []string{"A", "B", "C", "D"} { 2207 checkResp(acc) 2208 } 2209 }) 2210 } 2211 }) 2212 } 2213 } 2214 2215 func TestMsgTraceServiceImportWithLeafNodeHub(t *testing.T) { 2216 confHub := createConfFile(t, []byte(` 2217 listen: 127.0.0.1:-1 2218 server_name: "S1" 2219 accounts { 2220 A { 2221 users: [{user: a, password: pwd}] 2222 exports: [ { service: ">", allow_trace: true } ] 2223 mappings = { 2224 bar: bozo 2225 } 2226 } 2227 B { 2228 users: [{user: b, password: pwd}] 2229 imports: [ { service: {account: "A", subject:">"} } ] 2230 exports: [ { service: ">", allow_trace: true } ] 2231 } 2232 C { 2233 users: [{user: c, password: pwd}] 2234 exports: [ { service: ">", allow_trace: true } ] 2235 } 2236 D { 2237 users: [{user: d, password: pwd}] 2238 imports: [ 2239 { service: {account: "B", subject:"bar"}, to: baz } 2240 { service: {account: "C", subject:">"} } 2241 ] 2242 mappings = { 2243 bat: baz 2244 } 2245 } 2246 $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } 2247 } 2248 leafnodes { 2249 port: -1 2250 } 2251 `)) 2252 hub, ohub := RunServerWithConfig(confHub) 2253 defer hub.Shutdown() 2254 2255 confLeaf := createConfFile(t, []byte(fmt.Sprintf(` 2256 listen: 127.0.0.1:-1 2257 server_name: "S2" 2258 leafnodes { 2259 remotes [{url: "nats://d:pwd@127.0.0.1:%d"}] 2260 } 2261 `, ohub.LeafNode.Port))) 2262 leaf, _ := RunServerWithConfig(confLeaf) 2263 defer leaf.Shutdown() 2264 2265 checkLeafNodeConnectedCount(t, hub, 1) 2266 checkLeafNodeConnectedCount(t, leaf, 1) 2267 2268 nc2 := natsConnect(t, hub.ClientURL(), nats.UserInfo("a", "pwd"), nats.Name("ServiceA")) 2269 defer nc2.Close() 2270 recv := int32(0) 2271 natsQueueSub(t, nc2, "*", "my_queue", func(m *nats.Msg) { 2272 atomic.AddInt32(&recv, 1) 2273 m.Respond(m.Data) 2274 }) 2275 natsFlush(t, nc2) 2276 2277 nc3 := natsConnect(t, hub.ClientURL(), nats.UserInfo("c", "pwd"), nats.Name("ServiceC")) 2278 defer nc3.Close() 2279 natsSub(t, nc3, "baz", func(m *nats.Msg) { 2280 atomic.AddInt32(&recv, 1) 2281 m.Respond(m.Data) 2282 }) 2283 natsFlush(t, nc3) 2284 2285 nc := natsConnect(t, leaf.ClientURL(), nats.Name("Requestor")) 2286 defer nc.Close() 2287 2288 traceSub := natsSubSync(t, nc, "my.trace.subj") 2289 sub := natsSubSync(t, nc, "my.service.response.inbox") 2290 2291 checkSubInterest(t, leaf, globalAccountName, "bat", time.Second) 2292 2293 for _, test := range []struct { 2294 name string 2295 deliverMsg bool 2296 }{ 2297 {"just trace", false}, 2298 {"deliver msg", true}, 2299 } { 2300 t.Run(test.name, func(t *testing.T) { 2301 msg := nats.NewMsg("bat") 2302 msg.Header.Set(MsgTraceDest, traceSub.Subject) 2303 if !test.deliverMsg { 2304 msg.Header.Set(MsgTraceOnly, "true") 2305 } 2306 if !test.deliverMsg { 2307 msg.Data = []byte("request1") 2308 } else { 2309 msg.Data = []byte("request2") 2310 } 2311 msg.Reply = sub.Subject 2312 2313 err := nc.PublishMsg(msg) 2314 require_NoError(t, err) 2315 2316 if test.deliverMsg { 2317 for i := 0; i < 2; i++ { 2318 appMsg := natsNexMsg(t, sub, time.Second) 2319 require_Equal[string](t, string(appMsg.Data), "request2") 2320 } 2321 } 2322 // Check that no (more) messages are received. 2323 if msg, err := sub.NextMsg(100 * time.Millisecond); msg != nil || err != nats.ErrTimeout { 2324 t.Fatalf("Did not expect application message, got msg=%v err=%v", msg, err) 2325 } 2326 if !test.deliverMsg { 2327 // Just to make sure that message was not delivered to service 2328 // responders, wait a bit and check the recv value. 2329 time.Sleep(50 * time.Millisecond) 2330 if n := atomic.LoadInt32(&recv); n != 0 { 2331 t.Fatalf("Expected no message to be received, but service callback fired %d times", n) 2332 } 2333 } 2334 2335 check := func() { 2336 traceMsg := natsNexMsg(t, traceSub, time.Second) 2337 var e MsgTraceEvent 2338 json.Unmarshal(traceMsg.Data, &e) 2339 2340 ingress := e.Ingress() 2341 require_True(t, ingress != nil) 2342 switch ingress.Kind { 2343 case CLIENT: 2344 require_Equal[string](t, e.Server.Name, "S2") 2345 require_Equal[string](t, ingress.Account, globalAccountName) 2346 require_Equal[string](t, ingress.Subject, "bat") 2347 require_True(t, e.SubjectMapping() == nil) 2348 egress := e.Egresses() 2349 require_Equal[int](t, len(egress), 1) 2350 eg := egress[0] 2351 require_True(t, eg.Kind == LEAF) 2352 require_Equal[string](t, eg.Name, "S1") 2353 require_Equal[string](t, eg.Account, _EMPTY_) 2354 case LEAF: 2355 require_Equal[string](t, e.Server.Name, hub.Name()) 2356 require_Equal[string](t, ingress.Name, leaf.Name()) 2357 require_Equal[string](t, ingress.Account, "D") 2358 require_Equal[string](t, ingress.Subject, "bat") 2359 sm := e.SubjectMapping() 2360 require_True(t, sm != nil) 2361 require_Equal[string](t, sm.MappedTo, "baz") 2362 simps := e.ServiceImports() 2363 require_True(t, simps != nil) 2364 require_Equal[int](t, len(simps), 3) 2365 for _, si := range simps { 2366 switch si.Account { 2367 case "C": 2368 require_Equal[string](t, si.From, "baz") 2369 require_Equal[string](t, si.To, "baz") 2370 case "B": 2371 require_Equal[string](t, si.From, "baz") 2372 require_Equal[string](t, si.To, "bar") 2373 case "A": 2374 require_Equal[string](t, si.From, "bar") 2375 require_Equal[string](t, si.To, "bozo") 2376 default: 2377 t.Fatalf("Unexpected account name: %s", si.Account) 2378 } 2379 } 2380 egress := e.Egresses() 2381 require_Equal[int](t, len(egress), 2) 2382 for _, eg := range egress { 2383 require_True(t, eg.Kind == CLIENT) 2384 switch eg.Account { 2385 case "C": 2386 require_Equal[string](t, eg.Name, "ServiceC") 2387 require_Equal[string](t, eg.Subscription, "baz") 2388 require_Equal[string](t, eg.Queue, _EMPTY_) 2389 case "A": 2390 require_Equal[string](t, eg.Name, "ServiceA") 2391 require_Equal[string](t, eg.Subscription, "*") 2392 require_Equal[string](t, eg.Queue, "my_queue") 2393 default: 2394 t.Fatalf("Unexpected egress: %+v", eg) 2395 } 2396 } 2397 default: 2398 t.Fatalf("Unexpected ingress: %+v", ingress) 2399 } 2400 } 2401 // We should receive 2 events. 2402 check() 2403 check() 2404 // Make sure we are not receiving more traces 2405 if tm, err := traceSub.NextMsg(250 * time.Millisecond); err == nil { 2406 t.Fatalf("Should not have received trace message: %s", tm.Data) 2407 } 2408 2409 // Make sure we properly remove the responses. 2410 checkResp := func(an string) { 2411 acc, err := hub.lookupAccount(an) 2412 require_NoError(t, err) 2413 checkFor(t, time.Second, 15*time.Millisecond, func() error { 2414 if n := acc.NumPendingAllResponses(); n != 0 { 2415 return fmt.Errorf("Still %d responses for account %q pending on %s", n, an, hub) 2416 } 2417 return nil 2418 }) 2419 } 2420 for _, acc := range []string{"A", "B", "C", "D"} { 2421 checkResp(acc) 2422 } 2423 }) 2424 } 2425 } 2426 2427 func TestMsgTraceServiceImportWithLeafNodeLeaf(t *testing.T) { 2428 confHub := createConfFile(t, []byte(` 2429 listen: 127.0.0.1:-1 2430 server_name: "S1" 2431 accounts { 2432 A { 2433 users: [{user: a, password: pwd}] 2434 exports: [ { service: "bar", allow_trace: true } ] 2435 } 2436 B { 2437 users: [{user: b, password: pwd}] 2438 imports: [{ service: {account: "A", subject:"bar"}, to: baz }] 2439 } 2440 $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } 2441 } 2442 leafnodes { 2443 port: -1 2444 } 2445 `)) 2446 hub, ohub := RunServerWithConfig(confHub) 2447 defer hub.Shutdown() 2448 2449 confLeaf := createConfFile(t, []byte(fmt.Sprintf(` 2450 listen: 127.0.0.1:-1 2451 server_name: "S2" 2452 accounts { 2453 A { 2454 users: [{user: a, password: pwd}] 2455 exports: [ { service: "bar"} ] 2456 } 2457 B { users: [{user: b, password: pwd}] } 2458 $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } 2459 } 2460 leafnodes { 2461 remotes [ 2462 { 2463 url: "nats://a:pwd@127.0.0.1:%d" 2464 account: A 2465 } 2466 { 2467 url: "nats://b:pwd@127.0.0.1:%d" 2468 account: B 2469 } 2470 ] 2471 } 2472 `, ohub.LeafNode.Port, ohub.LeafNode.Port))) 2473 leaf, _ := RunServerWithConfig(confLeaf) 2474 defer leaf.Shutdown() 2475 2476 checkLeafNodeConnectedCount(t, hub, 2) 2477 checkLeafNodeConnectedCount(t, leaf, 2) 2478 2479 nc2 := natsConnect(t, leaf.ClientURL(), nats.UserInfo("a", "pwd"), nats.Name("ServiceA")) 2480 defer nc2.Close() 2481 recv := int32(0) 2482 natsQueueSub(t, nc2, "*", "my_queue", func(m *nats.Msg) { 2483 atomic.AddInt32(&recv, 1) 2484 m.Respond(m.Data) 2485 }) 2486 natsFlush(t, nc2) 2487 2488 nc := natsConnect(t, hub.ClientURL(), nats.UserInfo("b", "pwd"), nats.Name("Requestor")) 2489 defer nc.Close() 2490 2491 traceSub := natsSubSync(t, nc, "my.trace.subj") 2492 sub := natsSubSync(t, nc, "my.service.response.inbox") 2493 2494 // Check that hub has a subscription interest on "baz" 2495 checkSubInterest(t, hub, "A", "baz", time.Second) 2496 // And check that the leaf has the sub interest on the trace subject 2497 checkSubInterest(t, leaf, "B", traceSub.Subject, time.Second) 2498 2499 for _, test := range []struct { 2500 name string 2501 deliverMsg bool 2502 }{ 2503 {"just trace", false}, 2504 {"deliver msg", true}, 2505 } { 2506 t.Run(test.name, func(t *testing.T) { 2507 msg := nats.NewMsg("baz") 2508 msg.Header.Set(MsgTraceDest, traceSub.Subject) 2509 if !test.deliverMsg { 2510 msg.Header.Set(MsgTraceOnly, "true") 2511 } 2512 if !test.deliverMsg { 2513 msg.Data = []byte("request1") 2514 } else { 2515 msg.Data = []byte("request2") 2516 } 2517 msg.Reply = sub.Subject 2518 2519 err := nc.PublishMsg(msg) 2520 require_NoError(t, err) 2521 2522 if test.deliverMsg { 2523 appMsg := natsNexMsg(t, sub, time.Second) 2524 require_Equal[string](t, string(appMsg.Data), "request2") 2525 } 2526 // Check that no (more) messages are received. 2527 if msg, err := sub.NextMsg(100 * time.Millisecond); msg != nil || err != nats.ErrTimeout { 2528 t.Fatalf("Did not expect application message, got msg=%v err=%v", msg, err) 2529 } 2530 if !test.deliverMsg { 2531 // Just to make sure that message was not delivered to service 2532 // responders, wait a bit and check the recv value. 2533 time.Sleep(50 * time.Millisecond) 2534 if n := atomic.LoadInt32(&recv); n != 0 { 2535 t.Fatalf("Expected no message to be received, but service callback fired %d times", n) 2536 } 2537 } 2538 2539 check := func() { 2540 traceMsg := natsNexMsg(t, traceSub, time.Second) 2541 var e MsgTraceEvent 2542 json.Unmarshal(traceMsg.Data, &e) 2543 2544 ingress := e.Ingress() 2545 require_True(t, ingress != nil) 2546 2547 switch ingress.Kind { 2548 case CLIENT: 2549 require_Equal[string](t, e.Server.Name, "S1") 2550 require_Equal[string](t, ingress.Name, "Requestor") 2551 require_Equal[string](t, ingress.Account, "B") 2552 require_Equal[string](t, ingress.Subject, "baz") 2553 require_True(t, e.SubjectMapping() == nil) 2554 simps := e.ServiceImports() 2555 require_True(t, simps != nil) 2556 require_Equal[int](t, len(simps), 1) 2557 si := simps[0] 2558 require_Equal[string](t, si.Account, "A") 2559 require_Equal[string](t, si.From, "baz") 2560 require_Equal[string](t, si.To, "bar") 2561 egress := e.Egresses() 2562 require_Equal[int](t, len(egress), 1) 2563 eg := egress[0] 2564 require_True(t, eg.Kind == LEAF) 2565 require_Equal[string](t, eg.Name, "S2") 2566 require_Equal[string](t, eg.Account, "A") 2567 require_Equal[string](t, eg.Subscription, _EMPTY_) 2568 case LEAF: 2569 require_Equal[string](t, e.Server.Name, leaf.Name()) 2570 require_Equal[string](t, ingress.Name, hub.Name()) 2571 require_Equal[string](t, ingress.Account, "A") 2572 require_Equal[string](t, ingress.Subject, "bar") 2573 require_True(t, e.SubjectMapping() == nil) 2574 require_True(t, e.ServiceImports() == nil) 2575 egress := e.Egresses() 2576 require_Equal[int](t, len(egress), 1) 2577 eg := egress[0] 2578 require_True(t, eg.Kind == CLIENT) 2579 require_Equal[string](t, eg.Name, "ServiceA") 2580 require_Equal[string](t, eg.Subscription, "*") 2581 require_Equal[string](t, eg.Queue, "my_queue") 2582 default: 2583 t.Fatalf("Unexpected ingress: %+v", ingress) 2584 } 2585 } 2586 // We should receive 2 events. 2587 check() 2588 check() 2589 // Make sure we are not receiving more traces 2590 if tm, err := traceSub.NextMsg(250 * time.Millisecond); err == nil { 2591 t.Fatalf("Should not have received trace message: %s", tm.Data) 2592 } 2593 2594 // Make sure we properly remove the responses. 2595 checkResp := func(an string) { 2596 acc, err := leaf.lookupAccount(an) 2597 require_NoError(t, err) 2598 checkFor(t, time.Second, 15*time.Millisecond, func() error { 2599 if n := acc.NumPendingAllResponses(); n != 0 { 2600 return fmt.Errorf("Still %d responses for account %q pending on %s", n, an, leaf) 2601 } 2602 return nil 2603 }) 2604 } 2605 for _, acc := range []string{"A", "B"} { 2606 checkResp(acc) 2607 } 2608 }) 2609 } 2610 } 2611 2612 func TestMsgTraceStreamExport(t *testing.T) { 2613 tmpl := ` 2614 listen: 127.0.0.1:-1 2615 accounts { 2616 A { 2617 users: [{user: a, password: pwd}] 2618 exports: [ 2619 { stream: "info.*.*.>"} 2620 ] 2621 } 2622 B { 2623 users: [{user: b, password: pwd}] 2624 imports: [ { stream: {account: "A", subject:"info.*.*.>"}, to: "B.info.$2.$1.>", allow_trace: %v } ] 2625 } 2626 C { 2627 users: [{user: c, password: pwd}] 2628 imports: [ { stream: {account: "A", subject:"info.*.*.>"}, to: "C.info.$1.$2.>", allow_trace: %v } ] 2629 } 2630 } 2631 ` 2632 conf := createConfFile(t, []byte(fmt.Sprintf(tmpl, false, false))) 2633 s, _ := RunServerWithConfig(conf) 2634 defer s.Shutdown() 2635 2636 nc := natsConnect(t, s.ClientURL(), nats.UserInfo("a", "pwd"), nats.Name("Tracer")) 2637 defer nc.Close() 2638 traceSub := natsSubSync(t, nc, "my.trace.subj") 2639 2640 nc2 := natsConnect(t, s.ClientURL(), nats.UserInfo("b", "pwd"), nats.Name("sub1")) 2641 defer nc2.Close() 2642 sub1 := natsSubSync(t, nc2, "B.info.*.*.>") 2643 natsFlush(t, nc2) 2644 2645 nc3 := natsConnect(t, s.ClientURL(), nats.UserInfo("c", "pwd"), nats.Name("sub2")) 2646 defer nc3.Close() 2647 sub2 := natsQueueSubSync(t, nc3, "C.info.>", "my_queue") 2648 natsFlush(t, nc3) 2649 2650 for mainIter, mainTest := range []struct { 2651 name string 2652 allow bool 2653 }{ 2654 {"not allowed", false}, 2655 {"allowed", true}, 2656 {"not allowed again", false}, 2657 } { 2658 t.Run(mainTest.name, func(t *testing.T) { 2659 for _, test := range []struct { 2660 name string 2661 deliverMsg bool 2662 }{ 2663 {"just trace", false}, 2664 {"deliver msg", true}, 2665 } { 2666 t.Run(test.name, func(t *testing.T) { 2667 msg := nats.NewMsg("info.11.22.bar") 2668 msg.Header.Set(MsgTraceDest, traceSub.Subject) 2669 if !test.deliverMsg { 2670 msg.Header.Set(MsgTraceOnly, "true") 2671 } 2672 msg.Data = []byte("hello") 2673 2674 err := nc.PublishMsg(msg) 2675 require_NoError(t, err) 2676 2677 if test.deliverMsg { 2678 appMsg := natsNexMsg(t, sub1, time.Second) 2679 require_Equal[string](t, appMsg.Subject, "B.info.22.11.bar") 2680 appMsg = natsNexMsg(t, sub2, time.Second) 2681 require_Equal[string](t, appMsg.Subject, "C.info.11.22.bar") 2682 } 2683 // Check that no (more) messages are received. 2684 for _, sub := range []*nats.Subscription{sub1, sub2} { 2685 if msg, err := sub.NextMsg(100 * time.Millisecond); msg != nil || err != nats.ErrTimeout { 2686 t.Fatalf("Did not expect application message, got msg=%v err=%v", msg, err) 2687 } 2688 } 2689 2690 traceMsg := natsNexMsg(t, traceSub, time.Second) 2691 var e MsgTraceEvent 2692 json.Unmarshal(traceMsg.Data, &e) 2693 2694 require_Equal[string](t, e.Server.Name, s.Name()) 2695 ingress := e.Ingress() 2696 require_True(t, ingress != nil) 2697 require_True(t, ingress.Kind == CLIENT) 2698 require_Equal[string](t, ingress.Account, "A") 2699 require_Equal[string](t, ingress.Subject, "info.11.22.bar") 2700 require_True(t, e.SubjectMapping() == nil) 2701 require_True(t, e.ServiceImports() == nil) 2702 stexps := e.StreamExports() 2703 require_True(t, stexps != nil) 2704 require_Equal[int](t, len(stexps), 2) 2705 for _, se := range stexps { 2706 require_True(t, se.Timestamp != time.Time{}) 2707 switch se.Account { 2708 case "B": 2709 require_Equal[string](t, se.To, "B.info.22.11.bar") 2710 case "C": 2711 require_Equal[string](t, se.To, "C.info.11.22.bar") 2712 default: 2713 t.Fatalf("Unexpected stream export: %+v", se) 2714 } 2715 } 2716 egress := e.Egresses() 2717 if mainTest.allow { 2718 require_Equal[int](t, len(egress), 2) 2719 for _, eg := range egress { 2720 require_True(t, eg.Kind == CLIENT) 2721 switch eg.Account { 2722 case "B": 2723 require_Equal[string](t, eg.Name, "sub1") 2724 require_Equal[string](t, eg.Subscription, "info.*.*.>") 2725 require_Equal[string](t, eg.Queue, _EMPTY_) 2726 case "C": 2727 require_Equal[string](t, eg.Name, "sub2") 2728 require_Equal[string](t, eg.Subscription, "info.*.*.>") 2729 require_Equal[string](t, eg.Queue, "my_queue") 2730 default: 2731 t.Fatalf("Unexpected egress: %+v", eg) 2732 } 2733 } 2734 } else { 2735 require_Equal[int](t, len(egress), 0) 2736 } 2737 }) 2738 } 2739 switch mainIter { 2740 case 0: 2741 reloadUpdateConfig(t, s, conf, fmt.Sprintf(tmpl, true, true)) 2742 case 1: 2743 reloadUpdateConfig(t, s, conf, fmt.Sprintf(tmpl, false, false)) 2744 } 2745 }) 2746 } 2747 } 2748 2749 func TestMsgTraceStreamExportWithSuperCluster(t *testing.T) { 2750 for _, mainTest := range []struct { 2751 name string 2752 allowStr string 2753 allow bool 2754 }{ 2755 {"allowed", "true", true}, 2756 {"not allowed", "false", false}, 2757 } { 2758 t.Run(mainTest.name, func(t *testing.T) { 2759 tmpl := ` 2760 listen: 127.0.0.1:-1 2761 server_name: %s 2762 jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} 2763 2764 cluster { 2765 name: %s 2766 listen: 127.0.0.1:%d 2767 routes = [%s] 2768 } 2769 accounts { 2770 A { 2771 users: [{user: a, password: pwd}] 2772 exports: [ 2773 { stream: "info.*.*.>"} 2774 ] 2775 } 2776 B { 2777 users: [{user: b, password: pwd}] 2778 imports: [ { stream: {account: "A", subject:"info.*.*.>"}, to: "B.info.$2.$1.>", allow_trace: ` + mainTest.allowStr + ` } ] 2779 } 2780 C { 2781 users: [{user: c, password: pwd}] 2782 imports: [ { stream: {account: "A", subject:"info.*.*.>"}, to: "C.info.$1.$2.>", allow_trace: ` + mainTest.allowStr + ` } ] 2783 } 2784 $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } 2785 } 2786 ` 2787 sc := createJetStreamSuperClusterWithTemplate(t, tmpl, 2, 2) 2788 defer sc.shutdown() 2789 2790 sfornc := sc.clusters[0].servers[0] 2791 nc := natsConnect(t, sfornc.ClientURL(), nats.UserInfo("a", "pwd"), nats.Name("Tracer")) 2792 defer nc.Close() 2793 traceSub := natsSubSync(t, nc, "my.trace.subj") 2794 2795 sfornc2 := sc.clusters[0].servers[1] 2796 nc2 := natsConnect(t, sfornc2.ClientURL(), nats.UserInfo("b", "pwd"), nats.Name("sub1")) 2797 defer nc2.Close() 2798 sub1 := natsSubSync(t, nc2, "B.info.*.*.>") 2799 natsFlush(t, nc2) 2800 checkSubInterest(t, sfornc2, "A", traceSub.Subject, time.Second) 2801 2802 sfornc3 := sc.clusters[1].servers[0] 2803 nc3 := natsConnect(t, sfornc3.ClientURL(), nats.UserInfo("c", "pwd"), nats.Name("sub2")) 2804 defer nc3.Close() 2805 sub2 := natsQueueSubSync(t, nc3, "C.info.>", "my_queue") 2806 natsFlush(t, nc3) 2807 2808 checkSubInterest(t, sfornc, "A", "info.1.2.3.4", time.Second) 2809 for _, s := range sc.clusters[0].servers { 2810 checkForRegisteredQSubInterest(t, s, "C2", "A", "info.1.2.3", 1, time.Second) 2811 } 2812 2813 for _, test := range []struct { 2814 name string 2815 deliverMsg bool 2816 }{ 2817 {"just trace", false}, 2818 {"deliver msg", true}, 2819 } { 2820 t.Run(test.name, func(t *testing.T) { 2821 msg := nats.NewMsg("info.11.22.bar") 2822 msg.Header.Set(MsgTraceDest, traceSub.Subject) 2823 if !test.deliverMsg { 2824 msg.Header.Set(MsgTraceOnly, "true") 2825 } 2826 msg.Data = []byte("hello") 2827 2828 err := nc.PublishMsg(msg) 2829 require_NoError(t, err) 2830 2831 if test.deliverMsg { 2832 appMsg := natsNexMsg(t, sub1, time.Second) 2833 require_Equal[string](t, appMsg.Subject, "B.info.22.11.bar") 2834 appMsg = natsNexMsg(t, sub2, time.Second) 2835 require_Equal[string](t, appMsg.Subject, "C.info.11.22.bar") 2836 } 2837 // Check that no (more) messages are received. 2838 for _, sub := range []*nats.Subscription{sub1, sub2} { 2839 if msg, err := sub.NextMsg(100 * time.Millisecond); msg != nil || err != nats.ErrTimeout { 2840 t.Fatalf("Did not expect application message, got msg=%v err=%v", msg, err) 2841 } 2842 } 2843 2844 check := func() { 2845 traceMsg := natsNexMsg(t, traceSub, time.Second) 2846 var e MsgTraceEvent 2847 json.Unmarshal(traceMsg.Data, &e) 2848 2849 ingress := e.Ingress() 2850 require_True(t, ingress != nil) 2851 switch ingress.Kind { 2852 case CLIENT: 2853 require_Equal[string](t, e.Server.Name, sfornc.Name()) 2854 require_Equal[string](t, ingress.Name, "Tracer") 2855 require_Equal[string](t, ingress.Account, "A") 2856 require_Equal[string](t, ingress.Subject, "info.11.22.bar") 2857 require_True(t, e.SubjectMapping() == nil) 2858 require_True(t, e.ServiceImports() == nil) 2859 require_True(t, e.StreamExports() == nil) 2860 egress := e.Egresses() 2861 require_Equal[int](t, len(egress), 2) 2862 for _, eg := range egress { 2863 switch eg.Kind { 2864 case ROUTER: 2865 require_Equal[string](t, eg.Name, sfornc2.Name()) 2866 require_Equal[string](t, eg.Account, _EMPTY_) 2867 case GATEWAY: 2868 require_Equal[string](t, eg.Name, sfornc3.Name()) 2869 require_Equal[string](t, eg.Account, _EMPTY_) 2870 default: 2871 t.Fatalf("Unexpected egress: %+v", eg) 2872 } 2873 } 2874 case ROUTER: 2875 require_Equal[string](t, e.Server.Name, sfornc2.Name()) 2876 require_Equal[string](t, ingress.Name, sfornc.Name()) 2877 require_Equal[string](t, ingress.Account, "A") 2878 require_Equal[string](t, ingress.Subject, "info.11.22.bar") 2879 require_True(t, e.SubjectMapping() == nil) 2880 require_True(t, e.ServiceImports() == nil) 2881 stexps := e.StreamExports() 2882 require_True(t, stexps != nil) 2883 require_Equal[int](t, len(stexps), 1) 2884 se := stexps[0] 2885 require_Equal[string](t, se.Account, "B") 2886 require_Equal[string](t, se.To, "B.info.22.11.bar") 2887 egress := e.Egresses() 2888 if mainTest.allow { 2889 require_Equal[int](t, len(egress), 1) 2890 eg := egress[0] 2891 require_True(t, eg.Kind == CLIENT) 2892 require_Equal[string](t, eg.Name, "sub1") 2893 require_Equal[string](t, eg.Subscription, "info.*.*.>") 2894 require_Equal[string](t, eg.Queue, _EMPTY_) 2895 } else { 2896 require_Equal[int](t, len(egress), 0) 2897 } 2898 case GATEWAY: 2899 require_Equal[string](t, e.Server.Name, sfornc3.Name()) 2900 require_Equal[string](t, ingress.Name, sfornc.Name()) 2901 require_Equal[string](t, ingress.Account, "A") 2902 require_Equal[string](t, ingress.Subject, "info.11.22.bar") 2903 require_True(t, e.SubjectMapping() == nil) 2904 require_True(t, e.ServiceImports() == nil) 2905 stexps := e.StreamExports() 2906 require_True(t, stexps != nil) 2907 require_Equal[int](t, len(stexps), 1) 2908 se := stexps[0] 2909 require_Equal[string](t, se.Account, "C") 2910 require_Equal[string](t, se.To, "C.info.11.22.bar") 2911 egress := e.Egresses() 2912 if mainTest.allow { 2913 require_Equal[int](t, len(egress), 1) 2914 eg := egress[0] 2915 require_True(t, eg.Kind == CLIENT) 2916 require_Equal[string](t, eg.Name, "sub2") 2917 require_Equal[string](t, eg.Subscription, "info.*.*.>") 2918 require_Equal[string](t, eg.Queue, "my_queue") 2919 } else { 2920 require_Equal[int](t, len(egress), 0) 2921 } 2922 default: 2923 t.Fatalf("Unexpected ingress: %+v", ingress) 2924 } 2925 } 2926 // We expect 3 events 2927 check() 2928 check() 2929 check() 2930 // Make sure we are not receiving more traces 2931 if tm, err := traceSub.NextMsg(250 * time.Millisecond); err == nil { 2932 t.Fatalf("Should not have received trace message: %s", tm.Data) 2933 } 2934 }) 2935 } 2936 }) 2937 } 2938 } 2939 2940 func TestMsgTraceStreamExportWithLeafNode_Hub(t *testing.T) { 2941 confHub := createConfFile(t, []byte(` 2942 listen: 127.0.0.1:-1 2943 server_name: "S1" 2944 accounts { 2945 A { 2946 users: [{user: a, password: pwd}] 2947 exports: [ 2948 { stream: "info.*.*.>"} 2949 ] 2950 } 2951 B { 2952 users: [{user: b, password: pwd}] 2953 imports: [ { stream: {account: "A", subject:"info.*.*.>"}, to: "B.info.$2.$1.>", allow_trace: true } ] 2954 } 2955 C { 2956 users: [{user: c, password: pwd}] 2957 imports: [ { stream: {account: "A", subject:"info.*.*.>"}, to: "C.info.$1.$2.>", allow_trace: true } ] 2958 } 2959 } 2960 leafnodes { 2961 port: -1 2962 } 2963 `)) 2964 hub, ohub := RunServerWithConfig(confHub) 2965 defer hub.Shutdown() 2966 2967 confLeaf := createConfFile(t, []byte(fmt.Sprintf(` 2968 listen: 127.0.0.1:-1 2969 server_name: "S2" 2970 accounts { 2971 LEAF { users: [{user: leaf, password: pwd}] } 2972 } 2973 leafnodes { 2974 remotes [ 2975 { url: "nats://a:pwd@127.0.0.1:%d", account: "LEAF" } 2976 ] 2977 } 2978 `, ohub.LeafNode.Port))) 2979 leaf, _ := RunServerWithConfig(confLeaf) 2980 defer leaf.Shutdown() 2981 2982 checkLeafNodeConnectedCount(t, hub, 1) 2983 checkLeafNodeConnectedCount(t, leaf, 1) 2984 2985 nc := natsConnect(t, leaf.ClientURL(), nats.UserInfo("leaf", "pwd"), nats.Name("Tracer")) 2986 defer nc.Close() 2987 traceSub := natsSubSync(t, nc, "my.trace.subj") 2988 2989 checkSubInterest(t, hub, "A", traceSub.Subject, time.Second) 2990 2991 nc2 := natsConnect(t, hub.ClientURL(), nats.UserInfo("b", "pwd"), nats.Name("sub1")) 2992 defer nc2.Close() 2993 sub1 := natsSubSync(t, nc2, "B.info.*.*.>") 2994 natsFlush(t, nc2) 2995 2996 nc3 := natsConnect(t, hub.ClientURL(), nats.UserInfo("c", "pwd"), nats.Name("sub2")) 2997 defer nc3.Close() 2998 sub2 := natsQueueSubSync(t, nc3, "C.info.>", "my_queue") 2999 natsFlush(t, nc3) 3000 3001 acc, err := leaf.LookupAccount("LEAF") 3002 require_NoError(t, err) 3003 checkFor(t, time.Second, 50*time.Millisecond, func() error { 3004 acc.mu.RLock() 3005 sl := acc.sl 3006 acc.mu.RUnlock() 3007 r := sl.Match("info.1.2.3") 3008 ok := len(r.psubs) > 0 3009 if ok && (len(r.qsubs) == 0 || len(r.qsubs[0]) == 0) { 3010 ok = false 3011 } 3012 if !ok { 3013 return fmt.Errorf("Subscription interest not yet propagated") 3014 } 3015 return nil 3016 }) 3017 3018 for _, test := range []struct { 3019 name string 3020 deliverMsg bool 3021 }{ 3022 3023 {"just trace", false}, 3024 {"deliver msg", true}, 3025 } { 3026 3027 t.Run(test.name, func(t *testing.T) { 3028 msg := nats.NewMsg("info.11.22.bar") 3029 msg.Header.Set(MsgTraceDest, traceSub.Subject) 3030 if !test.deliverMsg { 3031 msg.Header.Set(MsgTraceOnly, "true") 3032 } 3033 msg.Data = []byte("hello") 3034 3035 err := nc.PublishMsg(msg) 3036 require_NoError(t, err) 3037 3038 if test.deliverMsg { 3039 appMsg := natsNexMsg(t, sub1, time.Second) 3040 require_Equal[string](t, appMsg.Subject, "B.info.22.11.bar") 3041 appMsg = natsNexMsg(t, sub2, time.Second) 3042 require_Equal[string](t, appMsg.Subject, "C.info.11.22.bar") 3043 } 3044 // Check that no (more) messages are received. 3045 for _, sub := range []*nats.Subscription{sub1, sub2} { 3046 if msg, err := sub.NextMsg(100 * time.Millisecond); msg != nil || err != nats.ErrTimeout { 3047 t.Fatalf("Did not expect application message, got msg=%v err=%v", msg, err) 3048 } 3049 } 3050 check := func() { 3051 traceMsg := natsNexMsg(t, traceSub, time.Second) 3052 var e MsgTraceEvent 3053 json.Unmarshal(traceMsg.Data, &e) 3054 3055 ingress := e.Ingress() 3056 require_True(t, ingress != nil) 3057 3058 switch ingress.Kind { 3059 case CLIENT: 3060 require_Equal[string](t, e.Server.Name, leaf.Name()) 3061 require_Equal[string](t, ingress.Name, "Tracer") 3062 require_Equal[string](t, ingress.Account, "LEAF") 3063 require_Equal[string](t, ingress.Subject, "info.11.22.bar") 3064 require_True(t, e.SubjectMapping() == nil) 3065 require_True(t, e.ServiceImports() == nil) 3066 require_True(t, e.StreamExports() == nil) 3067 egress := e.Egresses() 3068 require_Equal[int](t, len(egress), 1) 3069 eg := egress[0] 3070 require_True(t, eg.Kind == LEAF) 3071 require_Equal[string](t, eg.Name, hub.Name()) 3072 require_Equal[string](t, eg.Account, _EMPTY_) 3073 require_Equal[string](t, eg.Subscription, _EMPTY_) 3074 require_Equal[string](t, eg.Queue, _EMPTY_) 3075 case LEAF: 3076 require_Equal[string](t, e.Server.Name, hub.Name()) 3077 require_Equal[string](t, ingress.Name, leaf.Name()) 3078 require_Equal[string](t, ingress.Account, "A") 3079 require_Equal[string](t, ingress.Subject, "info.11.22.bar") 3080 require_True(t, e.SubjectMapping() == nil) 3081 require_True(t, e.ServiceImports() == nil) 3082 stexps := e.StreamExports() 3083 require_True(t, stexps != nil) 3084 require_Equal[int](t, len(stexps), 2) 3085 for _, se := range stexps { 3086 switch se.Account { 3087 case "B": 3088 require_Equal[string](t, se.To, "B.info.22.11.bar") 3089 case "C": 3090 require_Equal[string](t, se.To, "C.info.11.22.bar") 3091 default: 3092 t.Fatalf("Unexpected stream export: %+v", se) 3093 } 3094 } 3095 egress := e.Egresses() 3096 require_Equal[int](t, len(egress), 2) 3097 for _, eg := range egress { 3098 require_True(t, eg.Kind == CLIENT) 3099 switch eg.Account { 3100 case "B": 3101 require_Equal[string](t, eg.Name, "sub1") 3102 require_Equal[string](t, eg.Subscription, "info.*.*.>") 3103 require_Equal[string](t, eg.Queue, _EMPTY_) 3104 case "C": 3105 require_Equal[string](t, eg.Name, "sub2") 3106 require_Equal[string](t, eg.Subscription, "info.*.*.>") 3107 require_Equal[string](t, eg.Queue, "my_queue") 3108 default: 3109 t.Fatalf("Unexpected egress: %+v", eg) 3110 } 3111 } 3112 default: 3113 t.Fatalf("Unexpected ingress: %+v", ingress) 3114 } 3115 } 3116 // We expect 2 events 3117 check() 3118 check() 3119 // Make sure we are not receiving more traces 3120 if tm, err := traceSub.NextMsg(250 * time.Millisecond); err == nil { 3121 t.Fatalf("Should not have received trace message: %s", tm.Data) 3122 } 3123 }) 3124 } 3125 } 3126 3127 func TestMsgTraceStreamExportWithLeafNode_Leaf(t *testing.T) { 3128 confHub := createConfFile(t, []byte(` 3129 listen: 127.0.0.1:-1 3130 server_name: "S1" 3131 accounts { 3132 HUB { users: [{user: hub, password: pwd}] } 3133 } 3134 leafnodes { 3135 port: -1 3136 } 3137 `)) 3138 hub, ohub := RunServerWithConfig(confHub) 3139 defer hub.Shutdown() 3140 3141 confLeaf := createConfFile(t, []byte(fmt.Sprintf(` 3142 listen: 127.0.0.1:-1 3143 server_name: "S2" 3144 accounts { 3145 A { 3146 users: [{user: a, password: pwd}] 3147 exports: [ 3148 { stream: "info.*.*.>"} 3149 ] 3150 } 3151 B { 3152 users: [{user: b, password: pwd}] 3153 imports: [ { stream: {account: "A", subject:"info.*.*.>"}, to: "B.info.$2.$1.>", allow_trace: true } ] 3154 } 3155 C { 3156 users: [{user: c, password: pwd}] 3157 imports: [ { stream: {account: "A", subject:"info.*.*.>"}, to: "C.info.$1.$2.>", allow_trace: true } ] 3158 } 3159 } 3160 leafnodes { 3161 remotes [ 3162 { url: "nats://hub:pwd@127.0.0.1:%d", account: "A" } 3163 ] 3164 } 3165 `, ohub.LeafNode.Port))) 3166 leaf, _ := RunServerWithConfig(confLeaf) 3167 defer leaf.Shutdown() 3168 3169 checkLeafNodeConnectedCount(t, hub, 1) 3170 checkLeafNodeConnectedCount(t, leaf, 1) 3171 3172 nc := natsConnect(t, hub.ClientURL(), nats.UserInfo("hub", "pwd"), nats.Name("Tracer")) 3173 defer nc.Close() 3174 traceSub := natsSubSync(t, nc, "my.trace.subj") 3175 3176 checkSubInterest(t, leaf, "A", traceSub.Subject, time.Second) 3177 3178 nc2 := natsConnect(t, leaf.ClientURL(), nats.UserInfo("b", "pwd"), nats.Name("sub1")) 3179 defer nc2.Close() 3180 sub1 := natsSubSync(t, nc2, "B.info.*.*.>") 3181 natsFlush(t, nc2) 3182 3183 nc3 := natsConnect(t, leaf.ClientURL(), nats.UserInfo("c", "pwd"), nats.Name("sub2")) 3184 defer nc3.Close() 3185 sub2 := natsQueueSubSync(t, nc3, "C.info.>", "my_queue") 3186 natsFlush(t, nc3) 3187 3188 acc, err := hub.LookupAccount("HUB") 3189 require_NoError(t, err) 3190 checkFor(t, time.Second, 50*time.Millisecond, func() error { 3191 acc.mu.RLock() 3192 sl := acc.sl 3193 acc.mu.RUnlock() 3194 r := sl.Match("info.1.2.3") 3195 ok := len(r.psubs) > 0 3196 if ok && (len(r.qsubs) == 0 || len(r.qsubs[0]) == 0) { 3197 ok = false 3198 } 3199 if !ok { 3200 return fmt.Errorf("Subscription interest not yet propagated") 3201 } 3202 return nil 3203 }) 3204 3205 for _, test := range []struct { 3206 name string 3207 deliverMsg bool 3208 }{ 3209 3210 {"just trace", false}, 3211 {"deliver msg", true}, 3212 } { 3213 3214 t.Run(test.name, func(t *testing.T) { 3215 msg := nats.NewMsg("info.11.22.bar") 3216 msg.Header.Set(MsgTraceDest, traceSub.Subject) 3217 if !test.deliverMsg { 3218 msg.Header.Set(MsgTraceOnly, "true") 3219 } 3220 msg.Data = []byte("hello") 3221 3222 err := nc.PublishMsg(msg) 3223 require_NoError(t, err) 3224 3225 if test.deliverMsg { 3226 appMsg := natsNexMsg(t, sub1, time.Second) 3227 require_Equal[string](t, appMsg.Subject, "B.info.22.11.bar") 3228 appMsg = natsNexMsg(t, sub2, time.Second) 3229 require_Equal[string](t, appMsg.Subject, "C.info.11.22.bar") 3230 } 3231 // Check that no (more) messages are received. 3232 for _, sub := range []*nats.Subscription{sub1, sub2} { 3233 if msg, err := sub.NextMsg(100 * time.Millisecond); msg != nil || err != nats.ErrTimeout { 3234 t.Fatalf("Did not expect application message, got msg=%v err=%v", msg, err) 3235 } 3236 } 3237 check := func() { 3238 traceMsg := natsNexMsg(t, traceSub, time.Second) 3239 var e MsgTraceEvent 3240 json.Unmarshal(traceMsg.Data, &e) 3241 3242 ingress := e.Ingress() 3243 require_True(t, ingress != nil) 3244 3245 switch ingress.Kind { 3246 case CLIENT: 3247 require_Equal[string](t, e.Server.Name, hub.Name()) 3248 require_Equal[string](t, ingress.Name, "Tracer") 3249 require_Equal[string](t, ingress.Account, "HUB") 3250 require_Equal[string](t, ingress.Subject, "info.11.22.bar") 3251 require_True(t, e.SubjectMapping() == nil) 3252 require_True(t, e.ServiceImports() == nil) 3253 require_True(t, e.StreamExports() == nil) 3254 egress := e.Egresses() 3255 require_Equal[int](t, len(egress), 1) 3256 eg := egress[0] 3257 require_True(t, eg.Kind == LEAF) 3258 require_Equal[string](t, eg.Name, leaf.Name()) 3259 require_Equal[string](t, eg.Account, _EMPTY_) 3260 require_Equal[string](t, eg.Subscription, _EMPTY_) 3261 require_Equal[string](t, eg.Queue, _EMPTY_) 3262 case LEAF: 3263 require_Equal[string](t, e.Server.Name, leaf.Name()) 3264 require_Equal[string](t, ingress.Name, hub.Name()) 3265 require_Equal[string](t, ingress.Account, "A") 3266 require_Equal[string](t, ingress.Subject, "info.11.22.bar") 3267 require_True(t, e.SubjectMapping() == nil) 3268 require_True(t, e.ServiceImports() == nil) 3269 stexps := e.StreamExports() 3270 require_True(t, stexps != nil) 3271 require_Equal[int](t, len(stexps), 2) 3272 for _, se := range stexps { 3273 switch se.Account { 3274 case "B": 3275 require_Equal[string](t, se.To, "B.info.22.11.bar") 3276 case "C": 3277 require_Equal[string](t, se.To, "C.info.11.22.bar") 3278 default: 3279 t.Fatalf("Unexpected stream export: %+v", se) 3280 } 3281 } 3282 egress := e.Egresses() 3283 require_Equal[int](t, len(egress), 2) 3284 for _, eg := range egress { 3285 require_True(t, eg.Kind == CLIENT) 3286 switch eg.Account { 3287 case "B": 3288 require_Equal[string](t, eg.Name, "sub1") 3289 require_Equal[string](t, eg.Subscription, "info.*.*.>") 3290 require_Equal[string](t, eg.Queue, _EMPTY_) 3291 case "C": 3292 require_Equal[string](t, eg.Name, "sub2") 3293 require_Equal[string](t, eg.Subscription, "info.*.*.>") 3294 require_Equal[string](t, eg.Queue, "my_queue") 3295 default: 3296 t.Fatalf("Unexpected egress: %+v", eg) 3297 } 3298 } 3299 default: 3300 t.Fatalf("Unexpected ingress: %+v", ingress) 3301 } 3302 } 3303 // We expect 2 events 3304 check() 3305 check() 3306 // Make sure we are not receiving more traces 3307 if tm, err := traceSub.NextMsg(250 * time.Millisecond); err == nil { 3308 t.Fatalf("Should not have received trace message: %s", tm.Data) 3309 } 3310 }) 3311 } 3312 } 3313 3314 func TestMsgTraceJetStream(t *testing.T) { 3315 opts := DefaultTestOptions 3316 opts.Port = -1 3317 opts.JetStream = true 3318 opts.JetStreamMaxMemory = 270 3319 opts.StoreDir = t.TempDir() 3320 s := RunServer(&opts) 3321 defer s.Shutdown() 3322 3323 nc, js := jsClientConnect(t, s) 3324 defer nc.Close() 3325 3326 cfg := &nats.StreamConfig{ 3327 Name: "TEST", 3328 Storage: nats.MemoryStorage, 3329 Subjects: []string{"foo"}, 3330 Replicas: 1, 3331 AllowRollup: true, 3332 SubjectTransform: &nats.SubjectTransformConfig{ 3333 Source: "foo", 3334 Destination: "bar", 3335 }, 3336 } 3337 _, err := js.AddStream(cfg) 3338 require_NoError(t, err) 3339 3340 nct := natsConnect(t, s.ClientURL(), nats.Name("Tracer")) 3341 defer nct.Close() 3342 3343 traceSub := natsSubSync(t, nct, "my.trace.subj") 3344 natsFlush(t, nct) 3345 3346 msg := nats.NewMsg("foo") 3347 msg.Header.Set(JSMsgId, "MyId") 3348 msg.Data = make([]byte, 50) 3349 _, err = js.PublishMsg(msg) 3350 require_NoError(t, err) 3351 3352 checkStream := func(t *testing.T, expected int) { 3353 t.Helper() 3354 checkFor(t, time.Second, 15*time.Millisecond, func() error { 3355 si, err := js.StreamInfo("TEST") 3356 if err != nil { 3357 return err 3358 } 3359 if n := si.State.Msgs; int(n) != expected { 3360 return fmt.Errorf("Expected %d messages, got %v", expected, n) 3361 } 3362 return nil 3363 }) 3364 } 3365 checkStream(t, 1) 3366 3367 for _, test := range []struct { 3368 name string 3369 deliverMsg bool 3370 }{ 3371 {"just trace", false}, 3372 {"deliver msg", true}, 3373 } { 3374 t.Run(test.name, func(t *testing.T) { 3375 msg := nats.NewMsg("foo") 3376 msg.Header.Set(MsgTraceDest, traceSub.Subject) 3377 if !test.deliverMsg { 3378 msg.Header.Set(MsgTraceOnly, "true") 3379 } 3380 msg.Data = make([]byte, 50) 3381 err = nct.PublishMsg(msg) 3382 require_NoError(t, err) 3383 3384 // Wait a bit and check if message should be in the stream or not. 3385 time.Sleep(50 * time.Millisecond) 3386 if test.deliverMsg { 3387 checkStream(t, 2) 3388 } else { 3389 checkStream(t, 1) 3390 } 3391 3392 traceMsg := natsNexMsg(t, traceSub, time.Second) 3393 var e MsgTraceEvent 3394 json.Unmarshal(traceMsg.Data, &e) 3395 require_Equal[string](t, e.Server.Name, s.Name()) 3396 ingress := e.Ingress() 3397 require_True(t, ingress != nil) 3398 require_True(t, ingress.Kind == CLIENT) 3399 require_Equal[string](t, ingress.Name, "Tracer") 3400 require_Equal[int](t, len(e.Egresses()), 0) 3401 js := e.JetStream() 3402 require_True(t, js != nil) 3403 require_True(t, js.Timestamp != time.Time{}) 3404 require_Equal[string](t, js.Stream, "TEST") 3405 require_Equal[string](t, js.Subject, "bar") 3406 require_False(t, js.NoInterest) 3407 require_Equal[string](t, js.Error, _EMPTY_) 3408 }) 3409 } 3410 3411 jst, err := nct.JetStream() 3412 require_NoError(t, err) 3413 3414 mset, err := s.globalAccount().lookupStream("TEST") 3415 require_NoError(t, err) 3416 3417 // Now we will not ask for delivery and use headers that will fail checks 3418 // and make sure that message is not added, that the stream's clfs is not 3419 // increased, and that the JS trace shows the error. 3420 newMsg := func() *nats.Msg { 3421 msg = nats.NewMsg("foo") 3422 msg.Header.Set(MsgTraceDest, traceSub.Subject) 3423 msg.Header.Set(MsgTraceOnly, "true") 3424 msg.Data = []byte("hello") 3425 return msg 3426 } 3427 3428 msgCount := 2 3429 for _, test := range []struct { 3430 name string 3431 headerName string 3432 headerVal string 3433 expectedErr string 3434 special int 3435 }{ 3436 {"unexpected stream name", JSExpectedStream, "WRONG", "expected stream does not match", 0}, 3437 {"duplicate id", JSMsgId, "MyId", "duplicate", 0}, 3438 {"last seq by subject mismatch", JSExpectedLastSubjSeq, "10", "last sequence by subject mismatch", 0}, 3439 {"last seq mismatch", JSExpectedLastSeq, "10", "last sequence mismatch", 0}, 3440 {"last msgid mismatch", JSExpectedLastMsgId, "MyId3", "last msgid mismatch", 0}, 3441 {"invalid rollup command", JSMsgRollup, "wrong", "rollup value invalid: \"wrong\"", 0}, 3442 {"rollup not permitted", JSMsgRollup, JSMsgRollupSubject, "rollup not permitted", 1}, 3443 {"max msg size", _EMPTY_, _EMPTY_, ErrMaxPayload.Error(), 2}, 3444 {"normal message ok", _EMPTY_, _EMPTY_, _EMPTY_, 3}, 3445 {"insufficient resources", _EMPTY_, _EMPTY_, NewJSInsufficientResourcesError().Error(), 0}, 3446 {"stream sealed", _EMPTY_, _EMPTY_, NewJSStreamSealedError().Error(), 4}, 3447 } { 3448 t.Run(test.name, func(t *testing.T) { 3449 msg = newMsg() 3450 if test.headerName != _EMPTY_ { 3451 msg.Header.Set(test.headerName, test.headerVal) 3452 } 3453 switch test.special { 3454 case 1: 3455 // Update stream to prevent rollups, and set a max size. 3456 cfg.AllowRollup = false 3457 cfg.MaxMsgSize = 100 3458 _, err = js.UpdateStream(cfg) 3459 require_NoError(t, err) 3460 case 2: 3461 msg.Data = make([]byte, 200) 3462 case 3: 3463 pa, err := jst.Publish("foo", make([]byte, 100)) 3464 require_NoError(t, err) 3465 msgCount++ 3466 checkStream(t, msgCount) 3467 require_Equal[uint64](t, pa.Sequence, 3) 3468 return 3469 case 4: 3470 cfg.Sealed = true 3471 _, err = js.UpdateStream(cfg) 3472 require_NoError(t, err) 3473 default: 3474 } 3475 jst.PublishMsg(msg) 3476 3477 // Message count should not have increased and stay at 2. 3478 checkStream(t, msgCount) 3479 // Check that clfs does not increase 3480 mset.mu.RLock() 3481 clfs := mset.getCLFS() 3482 mset.mu.RUnlock() 3483 if clfs != 0 { 3484 t.Fatalf("Stream's clfs was expected to be 0, is %d", clfs) 3485 } 3486 traceMsg := natsNexMsg(t, traceSub, time.Second) 3487 var e MsgTraceEvent 3488 json.Unmarshal(traceMsg.Data, &e) 3489 require_Equal[string](t, e.Server.Name, s.Name()) 3490 ingress := e.Ingress() 3491 require_True(t, ingress != nil) 3492 require_True(t, ingress.Kind == CLIENT) 3493 require_Equal[string](t, ingress.Name, "Tracer") 3494 require_Equal[int](t, len(e.Egresses()), 0) 3495 js := e.JetStream() 3496 require_True(t, js != nil) 3497 require_Equal[string](t, js.Stream, "TEST") 3498 require_Equal[string](t, js.Subject, _EMPTY_) 3499 require_False(t, js.NoInterest) 3500 if et := js.Error; !strings.Contains(et, test.expectedErr) { 3501 t.Fatalf("Expected JS error to contain %q, got %q", test.expectedErr, et) 3502 } 3503 }) 3504 } 3505 3506 // Create a stream with interest retention policy 3507 _, err = js.AddStream(&nats.StreamConfig{ 3508 Name: "NO_INTEREST", 3509 Subjects: []string{"baz"}, 3510 Retention: nats.InterestPolicy, 3511 }) 3512 require_NoError(t, err) 3513 msg = nats.NewMsg("baz") 3514 msg.Header.Set(MsgTraceDest, traceSub.Subject) 3515 msg.Header.Set(MsgTraceOnly, "true") 3516 msg.Data = []byte("hello") 3517 err = nct.PublishMsg(msg) 3518 require_NoError(t, err) 3519 3520 traceMsg := natsNexMsg(t, traceSub, time.Second) 3521 var e MsgTraceEvent 3522 json.Unmarshal(traceMsg.Data, &e) 3523 require_Equal[string](t, e.Server.Name, s.Name()) 3524 ingress := e.Ingress() 3525 require_True(t, ingress != nil) 3526 require_True(t, ingress.Kind == CLIENT) 3527 require_Equal[string](t, ingress.Name, "Tracer") 3528 require_Equal[int](t, len(e.Egresses()), 0) 3529 ejs := e.JetStream() 3530 require_True(t, js != nil) 3531 require_Equal[string](t, ejs.Stream, "NO_INTEREST") 3532 require_Equal[string](t, ejs.Subject, "baz") 3533 require_True(t, ejs.NoInterest) 3534 require_Equal[string](t, ejs.Error, _EMPTY_) 3535 } 3536 3537 func TestMsgTraceJetStreamWithSuperCluster(t *testing.T) { 3538 sc := createJetStreamSuperCluster(t, 3, 2) 3539 defer sc.shutdown() 3540 3541 traceDest := "my.trace.subj" 3542 3543 // Hack to set the trace destination for the global account in order 3544 // to make sure that the traceParentHdr header is disabled when a message 3545 // is stored in JetStream, which will prevent emitting a trace 3546 // when such message is retrieved and traverses a route. 3547 // Without the account destination set, the trace would not be 3548 // triggered, but that does not mean that we would have been 3549 // doing the right thing of disabling the header. 3550 for _, cl := range sc.clusters { 3551 for _, s := range cl.servers { 3552 acc, err := s.LookupAccount(globalAccountName) 3553 require_NoError(t, err) 3554 acc.setTraceDest(traceDest) 3555 } 3556 } 3557 3558 c1 := sc.clusters[0] 3559 c2 := sc.clusters[1] 3560 nc, js := jsClientConnect(t, c1.randomServer()) 3561 defer nc.Close() 3562 3563 checkStream := func(t *testing.T, stream string, expected int) { 3564 t.Helper() 3565 checkFor(t, time.Second, 15*time.Millisecond, func() error { 3566 si, err := js.StreamInfo(stream) 3567 if err != nil { 3568 return err 3569 } 3570 if n := si.State.Msgs; int(n) != expected { 3571 return fmt.Errorf("Expected %d messages for stream %q, got %v", expected, stream, n) 3572 } 3573 return nil 3574 }) 3575 } 3576 3577 for mainIter, mainTest := range []struct { 3578 name string 3579 stream string 3580 }{ 3581 {"from stream leader", "TEST1"}, 3582 {"from non stream leader", "TEST2"}, 3583 {"from other cluster", "TEST3"}, 3584 } { 3585 t.Run(mainTest.name, func(t *testing.T) { 3586 cfg := &nats.StreamConfig{ 3587 Name: mainTest.stream, 3588 Replicas: 3, 3589 AllowRollup: true, 3590 } 3591 _, err := js.AddStream(cfg) 3592 require_NoError(t, err) 3593 sc.waitOnStreamLeader(globalAccountName, mainTest.stream) 3594 3595 // The streams are created from c1 cluster. 3596 slSrv := c1.streamLeader(globalAccountName, mainTest.stream) 3597 3598 // Store some messages 3599 payload := make([]byte, 50) 3600 for i := 0; i < 5; i++ { 3601 _, err = js.Publish(mainTest.stream, payload) 3602 require_NoError(t, err) 3603 } 3604 3605 // We will connect the app that sends the trace message to a server 3606 // that is either the stream leader, a random server in c1, or 3607 // a server in c2 (to go through a GW). 3608 var s *Server 3609 switch mainIter { 3610 case 0: 3611 s = slSrv 3612 case 1: 3613 s = c1.randomNonStreamLeader(globalAccountName, mainTest.stream) 3614 case 2: 3615 s = c2.randomServer() 3616 } 3617 3618 nct := natsConnect(t, s.ClientURL(), nats.Name("Tracer")) 3619 defer nct.Close() 3620 3621 traceSub := natsSubSync(t, nct, traceDest) 3622 natsFlush(t, nct) 3623 3624 for _, test := range []struct { 3625 name string 3626 deliverMsg bool 3627 }{ 3628 {"just trace", false}, 3629 {"deliver msg", true}, 3630 } { 3631 t.Run(test.name, func(t *testing.T) { 3632 msg := nats.NewMsg(mainTest.stream) 3633 msg.Header.Set(MsgTraceDest, traceSub.Subject) 3634 if !test.deliverMsg { 3635 msg.Header.Set(MsgTraceOnly, "true") 3636 } 3637 // We add the traceParentHdr header to make sure that it 3638 // is deactivated in addition to the Nats-Trace-Dest 3639 // header too when needed. 3640 msg.Header.Set(traceParentHdr, "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01") 3641 msg.Header.Set(JSMsgId, "MyId") 3642 msg.Data = payload 3643 err = nct.PublishMsg(msg) 3644 require_NoError(t, err) 3645 3646 if test.deliverMsg { 3647 checkStream(t, mainTest.stream, 6) 3648 } else { 3649 checkStream(t, mainTest.stream, 5) 3650 } 3651 3652 check := func() bool { 3653 traceMsg := natsNexMsg(t, traceSub, time.Second) 3654 var e MsgTraceEvent 3655 json.Unmarshal(traceMsg.Data, &e) 3656 3657 checkJS := func() { 3658 t.Helper() 3659 js := e.JetStream() 3660 require_True(t, js != nil) 3661 require_Equal[string](t, js.Stream, mainTest.stream) 3662 require_Equal[string](t, js.Subject, mainTest.stream) 3663 require_False(t, js.NoInterest) 3664 require_Equal[string](t, js.Error, _EMPTY_) 3665 } 3666 3667 ingress := e.Ingress() 3668 require_True(t, ingress != nil) 3669 switch mainIter { 3670 case 0: 3671 require_Equal[string](t, e.Server.Name, s.Name()) 3672 require_True(t, ingress.Kind == CLIENT) 3673 require_Equal[string](t, ingress.Name, "Tracer") 3674 require_Equal[int](t, len(e.Egresses()), 0) 3675 checkJS() 3676 case 1: 3677 switch ingress.Kind { 3678 case CLIENT: 3679 require_Equal[string](t, e.Server.Name, s.Name()) 3680 require_Equal[string](t, ingress.Name, "Tracer") 3681 egress := e.Egresses() 3682 require_Equal[int](t, len(egress), 1) 3683 ci := egress[0] 3684 require_True(t, ci.Kind == ROUTER) 3685 require_Equal[string](t, ci.Name, slSrv.Name()) 3686 case ROUTER: 3687 require_Equal[string](t, e.Server.Name, slSrv.Name()) 3688 require_Equal[int](t, len(e.Egresses()), 0) 3689 checkJS() 3690 default: 3691 t.Fatalf("Unexpected ingress: %+v", ingress) 3692 } 3693 case 2: 3694 switch ingress.Kind { 3695 case CLIENT: 3696 require_Equal[string](t, e.Server.Name, s.Name()) 3697 require_Equal[string](t, ingress.Name, "Tracer") 3698 egress := e.Egresses() 3699 require_Equal[int](t, len(egress), 1) 3700 ci := egress[0] 3701 require_True(t, ci.Kind == GATEWAY) 3702 // It could have gone to any server in the C1 cluster. 3703 // If it is not the stream leader, it should be 3704 // routed to it. 3705 case GATEWAY: 3706 require_Equal[string](t, ingress.Name, s.Name()) 3707 // If the server that emitted this event is the 3708 // stream leader, then we should have the stream, 3709 // otherwise, it should be routed. 3710 if e.Server.Name == slSrv.Name() { 3711 require_Equal[int](t, len(e.Egresses()), 0) 3712 checkJS() 3713 } else { 3714 egress := e.Egresses() 3715 require_Equal[int](t, len(egress), 1) 3716 ci := egress[0] 3717 require_True(t, ci.Kind == ROUTER) 3718 require_Equal[string](t, ci.Name, slSrv.Name()) 3719 return true 3720 } 3721 case ROUTER: 3722 require_Equal[string](t, e.Server.Name, slSrv.Name()) 3723 require_Equal[int](t, len(e.Egresses()), 0) 3724 checkJS() 3725 default: 3726 t.Fatalf("Unexpected ingress: %+v", ingress) 3727 } 3728 } 3729 return false 3730 } 3731 check() 3732 if mainIter > 0 { 3733 if check() { 3734 check() 3735 } 3736 } 3737 // Make sure we are not receiving more traces 3738 if tm, err := traceSub.NextMsg(250 * time.Millisecond); err == nil { 3739 t.Fatalf("Should not have received trace message: %s", tm.Data) 3740 } 3741 }) 3742 } 3743 3744 jst, err := nct.JetStream() 3745 require_NoError(t, err) 3746 3747 newMsg := func() *nats.Msg { 3748 msg := nats.NewMsg(mainTest.stream) 3749 msg.Header.Set(MsgTraceDest, traceSub.Subject) 3750 msg.Header.Set(MsgTraceOnly, "true") 3751 msg.Header.Set(traceParentHdr, "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01") 3752 msg.Data = []byte("hello") 3753 return msg 3754 } 3755 3756 msgCount := 6 3757 for _, subtest := range []struct { 3758 name string 3759 headerName string 3760 headerVal string 3761 expectedErr string 3762 special int 3763 }{ 3764 {"unexpected stream name", JSExpectedStream, "WRONG", "expected stream does not match", 0}, 3765 {"duplicate id", JSMsgId, "MyId", "duplicate", 0}, 3766 {"last seq by subject mismatch", JSExpectedLastSubjSeq, "3", "last sequence by subject mismatch", 0}, 3767 {"last seq mismatch", JSExpectedLastSeq, "10", "last sequence mismatch", 0}, 3768 {"last msgid mismatch", JSExpectedLastMsgId, "MyId3", "last msgid mismatch", 0}, 3769 {"invalid rollup command", JSMsgRollup, "wrong", "rollup value invalid: \"wrong\"", 0}, 3770 {"rollup not permitted", JSMsgRollup, JSMsgRollupSubject, "rollup not permitted", 1}, 3771 {"max msg size", _EMPTY_, _EMPTY_, ErrMaxPayload.Error(), 2}, 3772 {"new message ok", _EMPTY_, _EMPTY_, _EMPTY_, 3}, 3773 {"stream sealed", _EMPTY_, _EMPTY_, NewJSStreamSealedError().Error(), 4}, 3774 } { 3775 t.Run(subtest.name, func(t *testing.T) { 3776 msg := newMsg() 3777 if subtest.headerName != _EMPTY_ { 3778 msg.Header.Set(subtest.headerName, subtest.headerVal) 3779 } 3780 switch subtest.special { 3781 case 1: 3782 // Update stream to prevent rollups, and set a max size. 3783 cfg.AllowRollup = false 3784 cfg.MaxMsgSize = 100 3785 _, err = js.UpdateStream(cfg) 3786 require_NoError(t, err) 3787 case 2: 3788 msg.Data = make([]byte, 200) 3789 case 3: 3790 pa, err := jst.Publish(mainTest.stream, []byte("hello")) 3791 require_NoError(t, err) 3792 msgCount++ 3793 checkStream(t, mainTest.stream, msgCount) 3794 require_Equal[uint64](t, pa.Sequence, 7) 3795 return 3796 case 4: 3797 cfg.Sealed = true 3798 _, err = js.UpdateStream(cfg) 3799 require_NoError(t, err) 3800 default: 3801 } 3802 jst.PublishMsg(msg) 3803 checkStream(t, mainTest.stream, msgCount) 3804 checkJSTrace := func() bool { 3805 traceMsg := natsNexMsg(t, traceSub, time.Second) 3806 var e MsgTraceEvent 3807 json.Unmarshal(traceMsg.Data, &e) 3808 3809 checkJS := func() { 3810 t.Helper() 3811 js := e.JetStream() 3812 require_True(t, e.JetStream() != nil) 3813 require_Equal[string](t, js.Stream, mainTest.stream) 3814 require_Equal[string](t, js.Subject, _EMPTY_) 3815 require_False(t, js.NoInterest) 3816 if et := js.Error; !strings.Contains(et, subtest.expectedErr) { 3817 t.Fatalf("Expected JS error to contain %q, got %q", subtest.expectedErr, et) 3818 } 3819 } 3820 3821 ingress := e.Ingress() 3822 require_True(t, ingress != nil) 3823 // We will focus only on the trace message that 3824 // includes the JetStream event. 3825 switch mainIter { 3826 case 0: 3827 require_Equal[string](t, e.Server.Name, s.Name()) 3828 require_True(t, ingress.Kind == CLIENT) 3829 require_Equal[string](t, ingress.Name, "Tracer") 3830 require_Equal[int](t, len(e.Egresses()), 0) 3831 checkJS() 3832 case 1: 3833 if ingress.Kind == ROUTER { 3834 require_Equal[string](t, e.Server.Name, slSrv.Name()) 3835 require_Equal[int](t, len(e.Egresses()), 0) 3836 require_True(t, e.JetStream() != nil) 3837 checkJS() 3838 } 3839 case 2: 3840 switch ingress.Kind { 3841 case GATEWAY: 3842 require_Equal[string](t, ingress.Name, s.Name()) 3843 // If the server that emitted this event is the 3844 // stream leader, then we should have the stream, 3845 // otherwise, it should be routed. 3846 if e.Server.Name == slSrv.Name() { 3847 require_Equal[int](t, len(e.Egresses()), 0) 3848 checkJS() 3849 } else { 3850 return true 3851 } 3852 case ROUTER: 3853 require_Equal[string](t, e.Server.Name, slSrv.Name()) 3854 require_Equal[int](t, len(e.Egresses()), 0) 3855 checkJS() 3856 } 3857 } 3858 return false 3859 } 3860 checkJSTrace() 3861 if mainIter > 0 { 3862 if checkJSTrace() { 3863 checkJSTrace() 3864 } 3865 } 3866 }) 3867 } 3868 }) 3869 } 3870 3871 // Now cause a step-down, and verify count is as expected. 3872 for _, stream := range []string{"TEST1", "TEST2", "TEST3"} { 3873 _, err := nc.Request(fmt.Sprintf(JSApiStreamLeaderStepDownT, stream), nil, time.Second) 3874 require_NoError(t, err) 3875 sc.waitOnStreamLeader(globalAccountName, stream) 3876 checkStream(t, stream, 7) 3877 } 3878 3879 s := c1.randomNonStreamLeader(globalAccountName, "TEST1") 3880 // Try to get a message that will come from a route and make sure that 3881 // this does not trigger a trace message, that is, that headers have 3882 // been properly removed so that they don't trigger it. 3883 nct := natsConnect(t, s.ClientURL(), nats.Name("Tracer")) 3884 defer nct.Close() 3885 traceSub := natsSubSync(t, nct, traceDest) 3886 natsFlush(t, nct) 3887 3888 jct, err := nct.JetStream() 3889 require_NoError(t, err) 3890 3891 sub, err := jct.SubscribeSync("TEST1") 3892 require_NoError(t, err) 3893 for i := 0; i < 7; i++ { 3894 jmsg, err := sub.NextMsg(time.Second) 3895 require_NoError(t, err) 3896 require_Equal[string](t, jmsg.Header.Get(MsgTraceDest), _EMPTY_) 3897 } 3898 3899 msg, err := traceSub.NextMsg(250 * time.Millisecond) 3900 if err != nats.ErrTimeout { 3901 if msg != nil { 3902 t.Fatalf("Expected timeout, got msg headers=%+v data=%s", msg.Header, msg.Data) 3903 } 3904 t.Fatalf("Expected timeout, got err=%v", err) 3905 } 3906 } 3907 3908 func TestMsgTraceWithCompression(t *testing.T) { 3909 o := DefaultOptions() 3910 s := RunServer(o) 3911 defer s.Shutdown() 3912 3913 nc := natsConnect(t, s.ClientURL()) 3914 defer nc.Close() 3915 3916 traceSub := natsSubSync(t, nc, "my.trace.subj") 3917 natsFlush(t, nc) 3918 3919 for _, test := range []struct { 3920 compressAlgo string 3921 expectedHdr string 3922 unsupported bool 3923 }{ 3924 {"gzip", "gzip", false}, 3925 {"snappy", "snappy", false}, 3926 {"s2", "snappy", false}, 3927 {"bad one", "identity", true}, 3928 } { 3929 t.Run(test.compressAlgo, func(t *testing.T) { 3930 msg := nats.NewMsg("foo") 3931 msg.Header.Set(MsgTraceDest, traceSub.Subject) 3932 msg.Header.Set(acceptEncodingHeader, test.compressAlgo) 3933 msg.Data = []byte("hello!") 3934 err := nc.PublishMsg(msg) 3935 require_NoError(t, err) 3936 3937 traceMsg := natsNexMsg(t, traceSub, time.Second) 3938 data := traceMsg.Data 3939 eh := traceMsg.Header.Get(contentEncodingHeader) 3940 require_Equal[string](t, eh, test.expectedHdr) 3941 if test.unsupported { 3942 // We should be able to unmarshal directly 3943 } else { 3944 switch test.expectedHdr { 3945 case "gzip": 3946 zr, err := gzip.NewReader(bytes.NewReader(data)) 3947 require_NoError(t, err) 3948 data, err = io.ReadAll(zr) 3949 if err != nil && err != io.ErrUnexpectedEOF { 3950 t.Fatalf("Unexpected error: %v", err) 3951 } 3952 err = zr.Close() 3953 require_NoError(t, err) 3954 case "snappy": 3955 sr := s2.NewReader(bytes.NewReader(data)) 3956 data, err = io.ReadAll(sr) 3957 if err != nil && err != io.ErrUnexpectedEOF { 3958 t.Fatalf("Unexpected error: %v", err) 3959 } 3960 } 3961 } 3962 var e MsgTraceEvent 3963 err = json.Unmarshal(data, &e) 3964 require_NoError(t, err) 3965 ingress := e.Ingress() 3966 require_True(t, ingress != nil) 3967 require_Equal[string](t, e.Server.Name, s.Name()) 3968 require_Equal[string](t, ingress.Subject, "foo") 3969 }) 3970 } 3971 } 3972 3973 func TestMsgTraceHops(t *testing.T) { 3974 // Will have a test with following toplogy 3975 // 3976 // =================== =================== 3977 // = C1 cluster = = C2 cluster = 3978 // =================== <--- Gateway ---> =================== 3979 // = C1-S1 <-> C1-S2 = = C2-S1 = 3980 // =================== =================== 3981 // ^ ^ ^ 3982 // | Leafnode | | Leafnode 3983 // | | | 3984 // =================== =================== 3985 // = C3 cluster = = C4 cluster = 3986 // =================== =================== 3987 // = C3-S1 <-> C3-S2 = = C4-S1 = 3988 // =================== =================== 3989 // ^ 3990 // | Leafnode 3991 // |-------| 3992 // =================== 3993 // = C5 cluster = 3994 // =================== 3995 // = C5-S1 <-> C5-S2 = 3996 // =================== 3997 // 3998 // And a subscription on "foo" attached to all servers, and the subscription 3999 // on the trace subject attached to c1-s1 (and where the trace message will 4000 // be sent from). 4001 // 4002 commonTmpl := ` 4003 port: -1 4004 server_name: "%s-%s" 4005 accounts { 4006 A { users: [{user:"a", pass: "pwd"}] } 4007 $SYS { users: [{user:"admin", pass: "s3cr3t!"}] } 4008 } 4009 system_account: "$SYS" 4010 cluster { 4011 port: -1 4012 name: "%s" 4013 %s 4014 } 4015 ` 4016 genCommon := func(cname, sname string, routePort int) string { 4017 var routes string 4018 if routePort > 0 { 4019 routes = fmt.Sprintf(`routes: ["nats://127.0.0.1:%d"]`, routePort) 4020 } 4021 return fmt.Sprintf(commonTmpl, cname, sname, cname, routes) 4022 } 4023 c1s1conf := createConfFile(t, []byte(fmt.Sprintf(` 4024 %s 4025 gateway { 4026 port: -1 4027 name: "C1" 4028 } 4029 leafnodes { 4030 port: -1 4031 } 4032 `, genCommon("C1", "S1", 0)))) 4033 c1s1, oc1s1 := RunServerWithConfig(c1s1conf) 4034 defer c1s1.Shutdown() 4035 4036 c1s2conf := createConfFile(t, []byte(fmt.Sprintf(` 4037 %s 4038 gateway { 4039 port: -1 4040 name: "C1" 4041 } 4042 leafnodes { 4043 port: -1 4044 } 4045 `, genCommon("C1", "S2", oc1s1.Cluster.Port)))) 4046 c1s2, oc1s2 := RunServerWithConfig(c1s2conf) 4047 defer c1s2.Shutdown() 4048 4049 checkClusterFormed(t, c1s1, c1s2) 4050 4051 c2s1conf := createConfFile(t, []byte(fmt.Sprintf(` 4052 %s 4053 gateway { 4054 port: -1 4055 name: "C2" 4056 gateways [ 4057 { 4058 name: "C1" 4059 url: "nats://a:pwd@127.0.0.1:%d" 4060 } 4061 ] 4062 } 4063 leafnodes { 4064 port: -1 4065 } 4066 `, genCommon("C2", "S1", 0), oc1s1.Gateway.Port))) 4067 c2s1, oc2s1 := RunServerWithConfig(c2s1conf) 4068 defer c2s1.Shutdown() 4069 4070 c4s1conf := createConfFile(t, []byte(fmt.Sprintf(` 4071 %s 4072 leafnodes { 4073 remotes [{url: "nats://a:pwd@127.0.0.1:%d", account: "A"}] 4074 } 4075 `, genCommon("C4", "S1", 0), oc2s1.LeafNode.Port))) 4076 c4s1, _ := RunServerWithConfig(c4s1conf) 4077 defer c4s1.Shutdown() 4078 4079 for _, s := range []*Server{c1s1, c1s2, c2s1} { 4080 waitForOutboundGateways(t, s, 1, time.Second) 4081 } 4082 waitForInboundGateways(t, c2s1, 2, time.Second) 4083 4084 c3s1conf := createConfFile(t, []byte(fmt.Sprintf(` 4085 %s 4086 leafnodes { 4087 port: -1 4088 remotes [{url: "nats://a:pwd@127.0.0.1:%d", account: "A"}] 4089 } 4090 `, genCommon("C3", "S1", 0), oc1s1.LeafNode.Port))) 4091 c3s1, oc3s1 := RunServerWithConfig(c3s1conf) 4092 defer c3s1.Shutdown() 4093 4094 c3s2conf := createConfFile(t, []byte(fmt.Sprintf(` 4095 %s 4096 leafnodes { 4097 port: -1 4098 remotes [{url: "nats://a:pwd@127.0.0.1:%d", account: "A"}] 4099 } 4100 system_account: "$SYS" 4101 `, genCommon("C3", "S2", oc3s1.Cluster.Port), oc1s2.LeafNode.Port))) 4102 c3s2, oc3s2 := RunServerWithConfig(c3s2conf) 4103 defer c3s2.Shutdown() 4104 4105 checkClusterFormed(t, c3s1, c3s2) 4106 checkLeafNodeConnected(t, c1s1) 4107 checkLeafNodeConnected(t, c1s2) 4108 checkLeafNodeConnected(t, c3s1) 4109 checkLeafNodeConnected(t, c3s2) 4110 4111 c5s1conf := createConfFile(t, []byte(fmt.Sprintf(` 4112 %s 4113 leafnodes { 4114 remotes [{url: "nats://a:pwd@127.0.0.1:%d", account: "A"}] 4115 } 4116 `, genCommon("C5", "S1", 0), oc3s2.LeafNode.Port))) 4117 c5s1, oc5s1 := RunServerWithConfig(c5s1conf) 4118 defer c5s1.Shutdown() 4119 4120 c5s2conf := createConfFile(t, []byte(fmt.Sprintf(` 4121 %s 4122 leafnodes { 4123 remotes [{url: "nats://a:pwd@127.0.0.1:%d", account: "A"}] 4124 } 4125 `, genCommon("C5", "S2", oc5s1.Cluster.Port), oc3s2.LeafNode.Port))) 4126 c5s2, _ := RunServerWithConfig(c5s2conf) 4127 defer c5s2.Shutdown() 4128 4129 checkLeafNodeConnected(t, c5s1) 4130 checkLeafNodeConnected(t, c5s2) 4131 checkLeafNodeConnectedCount(t, c3s2, 3) 4132 4133 nct := natsConnect(t, c1s1.ClientURL(), nats.UserInfo("a", "pwd"), nats.Name("Tracer")) 4134 defer nct.Close() 4135 traceSub := natsSubSync(t, nct, "my.trace.subj") 4136 natsFlush(t, nct) 4137 4138 allServers := []*Server{c1s1, c1s2, c2s1, c3s1, c3s2, c4s1, c5s1, c5s2} 4139 // Check that the subscription interest on the trace subject reaches all servers. 4140 for _, s := range allServers { 4141 if s == c2s1 { 4142 // Gateway needs to be checked differently. 4143 checkGWInterestOnlyModeInterestOn(t, c2s1, "C1", "A", traceSub.Subject) 4144 continue 4145 } 4146 checkSubInterest(t, s, "A", traceSub.Subject, time.Second) 4147 } 4148 4149 var subs []*nats.Subscription 4150 // Now create a subscription on "foo" on all servers (do in reverse order). 4151 for i := len(allServers) - 1; i >= 0; i-- { 4152 s := allServers[i] 4153 nc := natsConnect(t, s.ClientURL(), nats.UserInfo("a", "pwd"), nats.Name(fmt.Sprintf("sub%d", i+1))) 4154 defer nc.Close() 4155 subs = append(subs, natsSubSync(t, nc, "foo")) 4156 natsFlush(t, nc) 4157 } 4158 4159 // Check sub interest on "foo" on all servers. 4160 for _, s := range allServers { 4161 checkSubInterest(t, s, "A", "foo", time.Second) 4162 } 4163 4164 // Now send a trace message from c1s1 4165 msg := nats.NewMsg("foo") 4166 msg.Header.Set(MsgTraceDest, traceSub.Subject) 4167 msg.Data = []byte("hello!") 4168 err := nct.PublishMsg(msg) 4169 require_NoError(t, err) 4170 4171 // Check that all subscriptions received the message 4172 for i, sub := range subs { 4173 appMsg, err := sub.NextMsg(time.Second) 4174 if err != nil { 4175 t.Fatalf("Error getting app message for server %q", allServers[i]) 4176 } 4177 require_Equal[string](t, string(appMsg.Data), "hello!") 4178 // Check that no (more) messages are received. 4179 if msg, err := sub.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 4180 t.Fatalf("Did not expect application message, got %s", msg.Data) 4181 } 4182 } 4183 4184 events := make(map[string]*MsgTraceEvent, 8) 4185 // We expect 8 events 4186 for i := 0; i < 8; i++ { 4187 traceMsg := natsNexMsg(t, traceSub, time.Second) 4188 var e MsgTraceEvent 4189 json.Unmarshal(traceMsg.Data, &e) 4190 4191 var hop string 4192 if hopVals := e.Request.Header[MsgTraceHop]; len(hopVals) > 0 { 4193 hop = hopVals[0] 4194 } 4195 events[hop] = &e 4196 } 4197 // Make sure we are not receiving more traces 4198 if tm, err := traceSub.NextMsg(250 * time.Millisecond); err == nil { 4199 t.Fatalf("Should not have received trace message: %s", tm.Data) 4200 } 4201 4202 checkIngress := func(e *MsgTraceEvent, kind int, name, hop string) *MsgTraceIngress { 4203 t.Helper() 4204 ingress := e.Ingress() 4205 require_True(t, ingress != nil) 4206 require_True(t, ingress.Kind == kind) 4207 require_Equal[string](t, ingress.Account, "A") 4208 require_Equal[string](t, ingress.Subject, "foo") 4209 require_Equal[string](t, ingress.Name, name) 4210 var hhop string 4211 if hopVals := e.Request.Header[MsgTraceHop]; len(hopVals) > 0 { 4212 hhop = hopVals[0] 4213 } 4214 require_Equal[string](t, hhop, hop) 4215 return ingress 4216 } 4217 4218 checkEgressClient := func(eg *MsgTraceEgress, name string) { 4219 t.Helper() 4220 require_True(t, eg.Kind == CLIENT) 4221 require_Equal[string](t, eg.Name, name) 4222 require_Equal[string](t, eg.Hop, _EMPTY_) 4223 require_Equal[string](t, eg.Subscription, "foo") 4224 require_Equal[string](t, eg.Queue, _EMPTY_) 4225 } 4226 4227 // First, we should have an event without a "hop" header, that is the 4228 // ingress from the client. 4229 e, ok := events[_EMPTY_] 4230 require_True(t, ok) 4231 checkIngress(e, CLIENT, "Tracer", _EMPTY_) 4232 require_Equal[int](t, e.Hops, 3) 4233 egress := e.Egresses() 4234 require_Equal[int](t, len(egress), 4) 4235 var ( 4236 leafC3S1Hop string 4237 leafC3S2Hop string 4238 leafC4S1Hop string 4239 leafC5S1Hop string 4240 leafC5S2Hop string 4241 routeC1S2Hop string 4242 gwC2S1Hop string 4243 ) 4244 for _, eg := range egress { 4245 switch eg.Kind { 4246 case CLIENT: 4247 checkEgressClient(eg, "sub1") 4248 case ROUTER: 4249 require_Equal[string](t, eg.Name, c1s2.Name()) 4250 routeC1S2Hop = eg.Hop 4251 case LEAF: 4252 require_Equal[string](t, eg.Name, c3s1.Name()) 4253 leafC3S1Hop = eg.Hop 4254 case GATEWAY: 4255 require_Equal[string](t, eg.Name, c2s1.Name()) 4256 gwC2S1Hop = eg.Hop 4257 default: 4258 t.Fatalf("Unexpected egress: %+v", eg) 4259 } 4260 } 4261 // All "hop" ids should be not empty and different from each other 4262 require_True(t, leafC3S1Hop != _EMPTY_ && routeC1S2Hop != _EMPTY_ && gwC2S1Hop != _EMPTY_) 4263 require_True(t, leafC3S1Hop != routeC1S2Hop && leafC3S1Hop != gwC2S1Hop && routeC1S2Hop != gwC2S1Hop) 4264 4265 // Now check the routed server in C1 (c1s2) 4266 e, ok = events[routeC1S2Hop] 4267 require_True(t, ok) 4268 checkIngress(e, ROUTER, c1s1.Name(), routeC1S2Hop) 4269 require_Equal[int](t, e.Hops, 1) 4270 egress = e.Egresses() 4271 require_Equal[int](t, len(egress), 2) 4272 for _, eg := range egress { 4273 switch eg.Kind { 4274 case CLIENT: 4275 checkEgressClient(eg, "sub2") 4276 case LEAF: 4277 require_Equal[string](t, eg.Name, c3s2.Name()) 4278 require_Equal[string](t, eg.Hop, routeC1S2Hop+".1") 4279 leafC3S2Hop = eg.Hop 4280 default: 4281 t.Fatalf("Unexpected egress: %+v", eg) 4282 } 4283 } 4284 require_True(t, leafC3S2Hop != _EMPTY_) 4285 4286 // Let's check the gateway server 4287 e, ok = events[gwC2S1Hop] 4288 require_True(t, ok) 4289 checkIngress(e, GATEWAY, c1s1.Name(), gwC2S1Hop) 4290 require_Equal[int](t, e.Hops, 1) 4291 egress = e.Egresses() 4292 require_Equal[int](t, len(egress), 2) 4293 for _, eg := range egress { 4294 switch eg.Kind { 4295 case CLIENT: 4296 checkEgressClient(eg, "sub3") 4297 case LEAF: 4298 require_Equal[string](t, eg.Name, c4s1.Name()) 4299 require_Equal[string](t, eg.Hop, gwC2S1Hop+".1") 4300 leafC4S1Hop = eg.Hop 4301 default: 4302 t.Fatalf("Unexpected egress: %+v", eg) 4303 } 4304 } 4305 require_True(t, leafC4S1Hop != _EMPTY_) 4306 4307 // Let's check the C3 cluster, starting at C3-S1 4308 e, ok = events[leafC3S1Hop] 4309 require_True(t, ok) 4310 checkIngress(e, LEAF, c1s1.Name(), leafC3S1Hop) 4311 require_Equal[int](t, e.Hops, 0) 4312 egress = e.Egresses() 4313 require_Equal[int](t, len(egress), 1) 4314 checkEgressClient(egress[0], "sub4") 4315 4316 // Now C3-S2 4317 e, ok = events[leafC3S2Hop] 4318 require_True(t, ok) 4319 checkIngress(e, LEAF, c1s2.Name(), leafC3S2Hop) 4320 require_Equal[int](t, e.Hops, 2) 4321 egress = e.Egresses() 4322 require_Equal[int](t, len(egress), 3) 4323 for _, eg := range egress { 4324 switch eg.Kind { 4325 case CLIENT: 4326 checkEgressClient(eg, "sub5") 4327 case LEAF: 4328 require_True(t, eg.Name == c5s1.Name() || eg.Name == c5s2.Name()) 4329 require_True(t, eg.Hop == leafC3S2Hop+".1" || eg.Hop == leafC3S2Hop+".2") 4330 if eg.Name == c5s1.Name() { 4331 leafC5S1Hop = eg.Hop 4332 } else { 4333 leafC5S2Hop = eg.Hop 4334 } 4335 default: 4336 t.Fatalf("Unexpected egress: %+v", eg) 4337 } 4338 } 4339 // The leafC5SxHop must be different and not empty 4340 require_True(t, leafC5S1Hop != _EMPTY_ && leafC5S1Hop != leafC5S2Hop && leafC5S2Hop != _EMPTY_) 4341 4342 // Check the C4 cluster 4343 e, ok = events[leafC4S1Hop] 4344 require_True(t, ok) 4345 checkIngress(e, LEAF, c2s1.Name(), leafC4S1Hop) 4346 require_Equal[int](t, e.Hops, 0) 4347 egress = e.Egresses() 4348 require_Equal[int](t, len(egress), 1) 4349 checkEgressClient(egress[0], "sub6") 4350 4351 // Finally, the C5 cluster, starting with C5-S1 4352 e, ok = events[leafC5S1Hop] 4353 require_True(t, ok) 4354 checkIngress(e, LEAF, c3s2.Name(), leafC5S1Hop) 4355 require_Equal[int](t, e.Hops, 0) 4356 egress = e.Egresses() 4357 require_Equal[int](t, len(egress), 1) 4358 checkEgressClient(egress[0], "sub7") 4359 4360 // Then C5-S2 4361 e, ok = events[leafC5S2Hop] 4362 require_True(t, ok) 4363 checkIngress(e, LEAF, c3s2.Name(), leafC5S2Hop) 4364 require_Equal[int](t, e.Hops, 0) 4365 egress = e.Egresses() 4366 require_Equal[int](t, len(egress), 1) 4367 checkEgressClient(egress[0], "sub8") 4368 } 4369 4370 func TestMsgTraceTriggeredByExternalHeader(t *testing.T) { 4371 tmpl := ` 4372 port: -1 4373 server_name: "%s" 4374 accounts { 4375 A { 4376 users: [{user:A, password: pwd}] 4377 trace_dest: "acc.trace.dest" 4378 } 4379 B { 4380 users: [{user:B, password: pwd}] 4381 %s 4382 } 4383 } 4384 cluster { 4385 name: "local" 4386 port: -1 4387 %s 4388 } 4389 ` 4390 conf1 := createConfFile(t, []byte(fmt.Sprintf(tmpl, "A", _EMPTY_, _EMPTY_))) 4391 s1, o1 := RunServerWithConfig(conf1) 4392 defer s1.Shutdown() 4393 4394 conf2 := createConfFile(t, []byte(fmt.Sprintf(tmpl, "B", _EMPTY_, fmt.Sprintf(`routes: ["nats://127.0.0.1:%d"]`, o1.Cluster.Port)))) 4395 s2, _ := RunServerWithConfig(conf2) 4396 defer s2.Shutdown() 4397 4398 checkClusterFormed(t, s1, s2) 4399 4400 nc2 := natsConnect(t, s2.ClientURL(), nats.UserInfo("A", "pwd")) 4401 defer nc2.Close() 4402 appSub := natsSubSync(t, nc2, "foo") 4403 natsFlush(t, nc2) 4404 4405 checkSubInterest(t, s1, "A", "foo", time.Second) 4406 4407 nc1 := natsConnect(t, s1.ClientURL(), nats.UserInfo("A", "pwd")) 4408 defer nc1.Close() 4409 4410 traceSub := natsSubSync(t, nc1, "trace.dest") 4411 accTraceSub := natsSubSync(t, nc1, "acc.trace.dest") 4412 natsFlush(t, nc1) 4413 4414 checkSubInterest(t, s1, "A", traceSub.Subject, time.Second) 4415 checkSubInterest(t, s1, "A", accTraceSub.Subject, time.Second) 4416 4417 var msgCount int 4418 for _, test := range []struct { 4419 name string 4420 setHeaders func(h nats.Header) 4421 traceTriggered bool 4422 traceOnly bool 4423 expectedAccSub bool 4424 }{ 4425 // Tests with external header only (no Nats-Trace-Dest). In this case, the 4426 // trace is triggered based on sampling (last token is `-01`). The presence 4427 // of Nats-Trace-Only has no effect and message should always be delivered 4428 // to the application. 4429 {"only external header sampling", 4430 func(h nats.Header) { 4431 h.Set(traceParentHdr, "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01") 4432 }, 4433 true, 4434 false, 4435 true}, 4436 {"only external header with different case and sampling", 4437 func(h nats.Header) { 4438 h.Set("TraceParent", "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01") 4439 }, 4440 true, 4441 false, 4442 true}, 4443 {"only external header sampling but not simply 01", 4444 func(h nats.Header) { 4445 h.Set(traceParentHdr, "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-25") 4446 }, 4447 true, 4448 false, 4449 true}, 4450 {"only external header no sampling", 4451 func(h nats.Header) { 4452 h.Set(traceParentHdr, "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00") 4453 }, 4454 false, 4455 false, 4456 false}, 4457 {"external header sampling and trace only", 4458 func(h nats.Header) { 4459 h.Set(traceParentHdr, "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01") 4460 h.Set(MsgTraceOnly, "true") 4461 }, 4462 true, 4463 false, 4464 true}, 4465 {"external header no sampling and trace only", 4466 func(h nats.Header) { 4467 h.Set(traceParentHdr, "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00") 4468 h.Set(MsgTraceOnly, "true") 4469 }, 4470 false, 4471 false, 4472 false}, 4473 // Tests where Nats-Trace-Dest is present, so ignore external header and 4474 // always deliver to the Nats-Trace-Dest, not the account. 4475 {"trace dest and external header sampling", 4476 func(h nats.Header) { 4477 h.Set(MsgTraceDest, traceSub.Subject) 4478 h.Set(traceParentHdr, "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01") 4479 }, 4480 true, 4481 false, 4482 false}, 4483 {"trace dest and external header no sampling", 4484 func(h nats.Header) { 4485 h.Set(MsgTraceDest, traceSub.Subject) 4486 h.Set(traceParentHdr, "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00") 4487 }, 4488 true, 4489 false, 4490 false}, 4491 {"trace dest with trace only and external header sampling", 4492 func(h nats.Header) { 4493 h.Set(MsgTraceDest, traceSub.Subject) 4494 h.Set(MsgTraceOnly, "true") 4495 h.Set(traceParentHdr, "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01") 4496 }, 4497 true, 4498 true, 4499 false}, 4500 {"trace dest with trace only and external header no sampling", 4501 func(h nats.Header) { 4502 h.Set(MsgTraceDest, traceSub.Subject) 4503 h.Set(MsgTraceOnly, "true") 4504 h.Set(traceParentHdr, "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00") 4505 }, 4506 true, 4507 true, 4508 false}, 4509 } { 4510 t.Run(test.name, func(t *testing.T) { 4511 msg := nats.NewMsg("foo") 4512 test.setHeaders(msg.Header) 4513 msgCount++ 4514 msgPayload := fmt.Sprintf("msg%d", msgCount) 4515 msg.Data = []byte(msgPayload) 4516 err := nc1.PublishMsg(msg) 4517 require_NoError(t, err) 4518 4519 if !test.traceOnly { 4520 appMsg := natsNexMsg(t, appSub, time.Second) 4521 require_Equal[string](t, string(appMsg.Data), msgPayload) 4522 } 4523 // Make sure we don't receive more (or not if trace only) 4524 if appMsg, err := appSub.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 4525 t.Fatalf("Expected no app message, got %q", appMsg.Data) 4526 } 4527 4528 checkTrace := func(sub *nats.Subscription) { 4529 // We should receive 2 traces, 1 per server. 4530 for i := 0; i < 2; i++ { 4531 tm := natsNexMsg(t, sub, time.Second) 4532 var e MsgTraceEvent 4533 err := json.Unmarshal(tm.Data, &e) 4534 require_NoError(t, err) 4535 } 4536 } 4537 4538 if test.traceTriggered { 4539 if test.expectedAccSub { 4540 checkTrace(accTraceSub) 4541 } else { 4542 checkTrace(traceSub) 4543 } 4544 } 4545 // Make sure no trace is received in the other trace sub 4546 // or no trace received at all. 4547 for _, sub := range []*nats.Subscription{accTraceSub, traceSub} { 4548 if tm, err := sub.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 4549 t.Fatalf("Expected no trace for the trace sub on %q, got %q", sub.Subject, tm.Data) 4550 } 4551 } 4552 }) 4553 } 4554 4555 nc1.Close() 4556 nc2.Close() 4557 4558 // Now replace connections and subs for account "B" 4559 nc2 = natsConnect(t, s2.ClientURL(), nats.UserInfo("B", "pwd")) 4560 defer nc2.Close() 4561 appSub = natsSubSync(t, nc2, "foo") 4562 natsFlush(t, nc2) 4563 4564 checkSubInterest(t, s1, "B", "foo", time.Second) 4565 4566 nc1 = natsConnect(t, s1.ClientURL(), nats.UserInfo("B", "pwd")) 4567 defer nc1.Close() 4568 4569 traceSub = natsSubSync(t, nc1, "trace.dest") 4570 accTraceSub = natsSubSync(t, nc1, "acc.trace.dest") 4571 natsFlush(t, nc1) 4572 4573 checkSubInterest(t, s1, "B", traceSub.Subject, time.Second) 4574 checkSubInterest(t, s1, "B", accTraceSub.Subject, time.Second) 4575 4576 for _, test := range []struct { 4577 name string 4578 reload bool 4579 }{ 4580 {"external header but no account destination", true}, 4581 {"external header with account destination added through config reload", false}, 4582 } { 4583 t.Run(test.name, func(t *testing.T) { 4584 msg := nats.NewMsg("foo") 4585 msg.Header.Set(traceParentHdr, "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01") 4586 msg.Data = []byte("hello") 4587 err := nc1.PublishMsg(msg) 4588 require_NoError(t, err) 4589 4590 // Application should receive the message 4591 appMsg := natsNexMsg(t, appSub, time.Second) 4592 require_Equal[string](t, string(appMsg.Data), "hello") 4593 // Only once... 4594 if appMsg, err := appSub.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 4595 t.Fatalf("Expected no app message, got %q", appMsg.Data) 4596 } 4597 if !test.reload { 4598 // We should receive the traces (1 per server) on the account destination 4599 for i := 0; i < 2; i++ { 4600 tm := natsNexMsg(t, accTraceSub, time.Second) 4601 var e MsgTraceEvent 4602 err := json.Unmarshal(tm.Data, &e) 4603 require_NoError(t, err) 4604 } 4605 } 4606 // No (or no more) trace message should be received. 4607 for _, sub := range []*nats.Subscription{accTraceSub, traceSub} { 4608 if tm, err := sub.NextMsg(100 * time.Millisecond); err != nats.ErrTimeout { 4609 t.Fatalf("Expected no trace for the trace sub on %q, got %q", sub.Subject, tm.Data) 4610 } 4611 } 4612 // Do the config reload and we will repeat the test and now 4613 // we should receive the trace message into the account 4614 // destination trace. 4615 if test.reload { 4616 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl, "A", `trace_dest: "acc.trace.dest"`, _EMPTY_)) 4617 reloadUpdateConfig(t, s2, conf2, fmt.Sprintf(tmpl, "B", `trace_dest: "acc.trace.dest"`, fmt.Sprintf(`routes: ["nats://127.0.0.1:%d"]`, o1.Cluster.Port))) 4618 } 4619 }) 4620 } 4621 } 4622 4623 func TestMsgTraceAccountTraceDestJWTUpdate(t *testing.T) { 4624 // create system account 4625 sysKp, _ := nkeys.CreateAccount() 4626 sysPub, _ := sysKp.PublicKey() 4627 sysCreds := newUser(t, sysKp) 4628 // create account A 4629 akp, _ := nkeys.CreateAccount() 4630 aPub, _ := akp.PublicKey() 4631 claim := jwt.NewAccountClaims(aPub) 4632 aJwt, err := claim.Encode(oKp) 4633 require_NoError(t, err) 4634 4635 dir := t.TempDir() 4636 conf := createConfFile(t, []byte(fmt.Sprintf(` 4637 listen: -1 4638 operator: %s 4639 resolver: { 4640 type: full 4641 dir: '%s' 4642 } 4643 system_account: %s 4644 `, ojwt, dir, sysPub))) 4645 s, _ := RunServerWithConfig(conf) 4646 defer s.Shutdown() 4647 updateJwt(t, s.ClientURL(), sysCreds, aJwt, 1) 4648 4649 nc := natsConnect(t, s.ClientURL(), createUserCreds(t, nil, akp)) 4650 defer nc.Close() 4651 4652 sub := natsSubSync(t, nc, "acc.trace.dest") 4653 natsFlush(t, nc) 4654 4655 for i, test := range []struct { 4656 name string 4657 traceTriggered bool 4658 }{ 4659 {"no acc dest", false}, 4660 {"adding trace dest", true}, 4661 {"removing trace dest", false}, 4662 } { 4663 t.Run(test.name, func(t *testing.T) { 4664 msg := nats.NewMsg("foo") 4665 msg.Header.Set(traceParentHdr, "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01") 4666 msg.Data = []byte("hello") 4667 err = nc.PublishMsg(msg) 4668 require_NoError(t, err) 4669 4670 if test.traceTriggered { 4671 tm := natsNexMsg(t, sub, time.Second) 4672 var e MsgTraceEvent 4673 err = json.Unmarshal(tm.Data, &e) 4674 require_NoError(t, err) 4675 // Simple check 4676 require_Equal[string](t, e.Server.Name, s.Name()) 4677 } 4678 // No (more) trace message expected. 4679 tm, err := sub.NextMsg(250 * time.Millisecond) 4680 if err != nats.ErrTimeout { 4681 t.Fatalf("Expected no trace message, got %s", tm.Data) 4682 } 4683 if i < 2 { 4684 if i == 0 { 4685 claim.Trace = &jwt.MsgTrace{Destination: "acc.trace.dest"} 4686 } else { 4687 claim.Trace = nil 4688 } 4689 aJwt, err = claim.Encode(oKp) 4690 require_NoError(t, err) 4691 updateJwt(t, s.ClientURL(), sysCreds, aJwt, 1) 4692 } 4693 }) 4694 } 4695 } 4696 4697 func TestMsgTraceServiceJWTUpdate(t *testing.T) { 4698 // create system account 4699 sysKp, _ := nkeys.CreateAccount() 4700 sysPub, _ := sysKp.PublicKey() 4701 sysCreds := newUser(t, sysKp) 4702 // create account A 4703 akp, _ := nkeys.CreateAccount() 4704 aPub, _ := akp.PublicKey() 4705 aClaim := jwt.NewAccountClaims(aPub) 4706 serviceExport := &jwt.Export{Subject: "req", Type: jwt.Service} 4707 aClaim.Exports.Add(serviceExport) 4708 aJwt, err := aClaim.Encode(oKp) 4709 require_NoError(t, err) 4710 // create account B 4711 bkp, _ := nkeys.CreateAccount() 4712 bPub, _ := bkp.PublicKey() 4713 bClaim := jwt.NewAccountClaims(bPub) 4714 serviceImport := &jwt.Import{Account: aPub, Subject: "req", Type: jwt.Service} 4715 bClaim.Imports.Add(serviceImport) 4716 bJwt, err := bClaim.Encode(oKp) 4717 require_NoError(t, err) 4718 4719 dir := t.TempDir() 4720 conf := createConfFile(t, []byte(fmt.Sprintf(` 4721 listen: -1 4722 operator: %s 4723 resolver: { 4724 type: full 4725 dir: '%s' 4726 } 4727 system_account: %s 4728 `, ojwt, dir, sysPub))) 4729 s, _ := RunServerWithConfig(conf) 4730 defer s.Shutdown() 4731 updateJwt(t, s.ClientURL(), sysCreds, aJwt, 1) 4732 updateJwt(t, s.ClientURL(), sysCreds, bJwt, 1) 4733 4734 ncA := natsConnect(t, s.ClientURL(), createUserCreds(t, nil, akp), nats.Name("Service")) 4735 defer ncA.Close() 4736 4737 natsSub(t, ncA, "req", func(m *nats.Msg) { 4738 m.Respond([]byte("resp")) 4739 }) 4740 natsFlush(t, ncA) 4741 4742 ncB := natsConnect(t, s.ClientURL(), createUserCreds(t, nil, bkp)) 4743 defer ncB.Close() 4744 4745 sub := natsSubSync(t, ncB, "trace.dest") 4746 natsFlush(t, ncB) 4747 4748 for i, test := range []struct { 4749 name string 4750 allowTrace bool 4751 }{ 4752 {"trace not allowed", false}, 4753 {"trace allowed", true}, 4754 {"trace not allowed again", false}, 4755 } { 4756 t.Run(test.name, func(t *testing.T) { 4757 msg := nats.NewMsg("req") 4758 msg.Header.Set(MsgTraceDest, sub.Subject) 4759 msg.Data = []byte("request") 4760 reply, err := ncB.RequestMsg(msg, time.Second) 4761 require_NoError(t, err) 4762 require_Equal[string](t, string(reply.Data), "resp") 4763 4764 tm := natsNexMsg(t, sub, time.Second) 4765 var e MsgTraceEvent 4766 err = json.Unmarshal(tm.Data, &e) 4767 require_NoError(t, err) 4768 require_Equal[string](t, e.Server.Name, s.Name()) 4769 require_Equal[string](t, e.Ingress().Account, bPub) 4770 sis := e.ServiceImports() 4771 require_Equal[int](t, len(sis), 1) 4772 si := sis[0] 4773 require_Equal[string](t, si.Account, aPub) 4774 egresses := e.Egresses() 4775 if !test.allowTrace { 4776 require_Equal[int](t, len(egresses), 0) 4777 } else { 4778 require_Equal[int](t, len(egresses), 1) 4779 eg := egresses[0] 4780 require_Equal[string](t, eg.Name, "Service") 4781 require_Equal[string](t, eg.Account, aPub) 4782 require_Equal[string](t, eg.Subscription, "req") 4783 } 4784 // No (more) trace message expected. 4785 tm, err = sub.NextMsg(250 * time.Millisecond) 4786 if err != nats.ErrTimeout { 4787 t.Fatalf("Expected no trace message, got %s", tm.Data) 4788 } 4789 if i < 2 { 4790 // Set AllowTrace to true at the first iteration, then 4791 // false at the second. 4792 aClaim.Exports[0].AllowTrace = (i == 0) 4793 aJwt, err = aClaim.Encode(oKp) 4794 require_NoError(t, err) 4795 updateJwt(t, s.ClientURL(), sysCreds, aJwt, 1) 4796 } 4797 }) 4798 } 4799 } 4800 4801 func TestMsgTraceStreamJWTUpdate(t *testing.T) { 4802 // create system account 4803 sysKp, _ := nkeys.CreateAccount() 4804 sysPub, _ := sysKp.PublicKey() 4805 sysCreds := newUser(t, sysKp) 4806 // create account A 4807 akp, _ := nkeys.CreateAccount() 4808 aPub, _ := akp.PublicKey() 4809 aClaim := jwt.NewAccountClaims(aPub) 4810 streamExport := &jwt.Export{Subject: "info", Type: jwt.Stream} 4811 aClaim.Exports.Add(streamExport) 4812 aJwt, err := aClaim.Encode(oKp) 4813 require_NoError(t, err) 4814 // create account B 4815 bkp, _ := nkeys.CreateAccount() 4816 bPub, _ := bkp.PublicKey() 4817 bClaim := jwt.NewAccountClaims(bPub) 4818 streamImport := &jwt.Import{Account: aPub, Subject: "info", To: "b", Type: jwt.Stream} 4819 bClaim.Imports.Add(streamImport) 4820 bJwt, err := bClaim.Encode(oKp) 4821 require_NoError(t, err) 4822 4823 dir := t.TempDir() 4824 conf := createConfFile(t, []byte(fmt.Sprintf(` 4825 listen: -1 4826 operator: %s 4827 resolver: { 4828 type: full 4829 dir: '%s' 4830 } 4831 system_account: %s 4832 `, ojwt, dir, sysPub))) 4833 s, _ := RunServerWithConfig(conf) 4834 defer s.Shutdown() 4835 updateJwt(t, s.ClientURL(), sysCreds, aJwt, 1) 4836 updateJwt(t, s.ClientURL(), sysCreds, bJwt, 1) 4837 4838 ncA := natsConnect(t, s.ClientURL(), createUserCreds(t, nil, akp)) 4839 defer ncA.Close() 4840 4841 traceSub := natsSubSync(t, ncA, "trace.dest") 4842 natsFlush(t, ncA) 4843 4844 ncB := natsConnect(t, s.ClientURL(), createUserCreds(t, nil, bkp), nats.Name("BInfo")) 4845 defer ncB.Close() 4846 4847 appSub := natsSubSync(t, ncB, "b.info") 4848 natsFlush(t, ncB) 4849 4850 for i, test := range []struct { 4851 name string 4852 allowTrace bool 4853 }{ 4854 {"trace not allowed", false}, 4855 {"trace allowed", true}, 4856 {"trace not allowed again", false}, 4857 } { 4858 t.Run(test.name, func(t *testing.T) { 4859 msg := nats.NewMsg("info") 4860 msg.Header.Set(MsgTraceDest, traceSub.Subject) 4861 msg.Data = []byte("some info") 4862 err = ncA.PublishMsg(msg) 4863 require_NoError(t, err) 4864 4865 appMsg := natsNexMsg(t, appSub, time.Second) 4866 require_Equal[string](t, string(appMsg.Data), "some info") 4867 4868 tm := natsNexMsg(t, traceSub, time.Second) 4869 var e MsgTraceEvent 4870 err = json.Unmarshal(tm.Data, &e) 4871 require_NoError(t, err) 4872 require_Equal[string](t, e.Server.Name, s.Name()) 4873 ses := e.StreamExports() 4874 require_Equal[int](t, len(ses), 1) 4875 se := ses[0] 4876 require_Equal[string](t, se.Account, bPub) 4877 require_Equal[string](t, se.To, "b.info") 4878 egresses := e.Egresses() 4879 if !test.allowTrace { 4880 require_Equal[int](t, len(egresses), 0) 4881 } else { 4882 require_Equal[int](t, len(egresses), 1) 4883 eg := egresses[0] 4884 require_Equal[string](t, eg.Name, "BInfo") 4885 require_Equal[string](t, eg.Account, bPub) 4886 require_Equal[string](t, eg.Subscription, "info") 4887 } 4888 // No (more) trace message expected. 4889 tm, err = traceSub.NextMsg(250 * time.Millisecond) 4890 if err != nats.ErrTimeout { 4891 t.Fatalf("Expected no trace message, got %s", tm.Data) 4892 } 4893 if i < 2 { 4894 // Set AllowTrace to true at the first iteration, then 4895 // false at the second. 4896 bClaim.Imports[0].AllowTrace = (i == 0) 4897 bJwt, err = bClaim.Encode(oKp) 4898 require_NoError(t, err) 4899 updateJwt(t, s.ClientURL(), sysCreds, bJwt, 1) 4900 } 4901 }) 4902 } 4903 } 4904 4905 func TestMsgTraceParseAccountDestWithSampling(t *testing.T) { 4906 tmpl := ` 4907 port: -1 4908 accounts { 4909 A { 4910 users: [{user: a, password: pwd}] 4911 %s 4912 } 4913 } 4914 ` 4915 for _, test := range []struct { 4916 name string 4917 samplingStr string 4918 want int 4919 }{ 4920 {"trace sampling no dest", `msg_trace: {sampling: 50}`, 0}, 4921 {"trace dest only", `msg_trace: {dest: foo}`, 100}, 4922 {"trace dest with number only", `msg_trace: {dest: foo, sampling: 20}`, 20}, 4923 {"trace dest with percentage", `msg_trace: {dest: foo, sampling: 50%}`, 50}, 4924 } { 4925 t.Run(test.name, func(t *testing.T) { 4926 conf := createConfFile(t, []byte(fmt.Sprintf(tmpl, test.samplingStr))) 4927 o := LoadConfig(conf) 4928 _, sampling := o.Accounts[0].getTraceDestAndSampling() 4929 require_Equal[int](t, test.want, sampling) 4930 }) 4931 } 4932 } 4933 4934 func TestMsgTraceAccountDestWithSampling(t *testing.T) { 4935 tmpl := ` 4936 port: -1 4937 server_name: %s 4938 accounts { 4939 A { 4940 users: [{user: a, password:pwd}] 4941 msg_trace: {dest: "acc.dest"%s} 4942 } 4943 } 4944 cluster { 4945 port: -1 4946 %s 4947 } 4948 ` 4949 conf1 := createConfFile(t, []byte(fmt.Sprintf(tmpl, "A", _EMPTY_, _EMPTY_))) 4950 s1, o1 := RunServerWithConfig(conf1) 4951 defer s1.Shutdown() 4952 4953 routes := fmt.Sprintf("routes: [\"nats://127.0.0.1:%d\"]", o1.Cluster.Port) 4954 conf2 := createConfFile(t, []byte(fmt.Sprintf(tmpl, "B", _EMPTY_, routes))) 4955 s2, _ := RunServerWithConfig(conf2) 4956 defer s2.Shutdown() 4957 4958 checkClusterFormed(t, s1, s2) 4959 4960 nc2 := natsConnect(t, s2.ClientURL(), nats.UserInfo("a", "pwd")) 4961 defer nc2.Close() 4962 natsSub(t, nc2, "foo", func(_ *nats.Msg) {}) 4963 natsFlush(t, nc2) 4964 4965 nc1 := natsConnect(t, s1.ClientURL(), nats.UserInfo("a", "pwd")) 4966 defer nc1.Close() 4967 sub := natsSubSync(t, nc1, "acc.dest") 4968 natsFlush(t, nc1) 4969 4970 checkSubInterest(t, s1, "A", "foo", time.Second) 4971 checkSubInterest(t, s2, "A", "acc.dest", time.Second) 4972 4973 for iter, test := range []struct { 4974 name string 4975 samplingStr string 4976 sampling int 4977 }{ 4978 // Sampling is considered 100% if not specified or <=0 or >= 100. 4979 // To disable sampling, the account destination should not be specified. 4980 {"no sampling specified", _EMPTY_, 100}, 4981 {"sampling specified", ", sampling: \"25%\"", 25}, 4982 {"no sampling again", _EMPTY_, 100}, 4983 } { 4984 t.Run(test.name, func(t *testing.T) { 4985 if iter > 0 { 4986 reloadUpdateConfig(t, s1, conf1, fmt.Sprintf(tmpl, "A", test.samplingStr, _EMPTY_)) 4987 reloadUpdateConfig(t, s2, conf2, fmt.Sprintf(tmpl, "B", test.samplingStr, routes)) 4988 } 4989 msg := nats.NewMsg("foo") 4990 msg.Header.Set(traceParentHdr, "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01") 4991 total := 400 4992 for i := 0; i < total; i++ { 4993 err := nc1.PublishMsg(msg) 4994 require_NoError(t, err) 4995 } 4996 // Wait a bit to make sure that we received all traces that should 4997 // have been received. 4998 time.Sleep(500 * time.Millisecond) 4999 n, _, err := sub.Pending() 5000 require_NoError(t, err) 5001 fromClient := 0 5002 fromRoute := 0 5003 for i := 0; i < n; i++ { 5004 msg = natsNexMsg(t, sub, time.Second) 5005 var e MsgTraceEvent 5006 err = json.Unmarshal(msg.Data, &e) 5007 require_NoError(t, err) 5008 ingress := e.Ingress() 5009 require_True(t, ingress != nil) 5010 switch ingress.Kind { 5011 case CLIENT: 5012 fromClient++ 5013 case ROUTER: 5014 fromRoute++ 5015 default: 5016 t.Fatalf("Unexpected ingress: %+v", ingress) 5017 } 5018 } 5019 // There should be as many messages coming from the origin server 5020 // and the routed server. This checks that if sampling is not 100% 5021 // then when a message is routed, the header is properly deactivated. 5022 require_Equal[int](t, fromClient, fromRoute) 5023 // Now check that if sampling was 100%, we have the total number 5024 // of published messages. 5025 if test.sampling == 100 { 5026 require_Equal[int](t, fromClient, total) 5027 } else { 5028 // Otherwise, we should have no more (but let's be conservative) 5029 // than the sampling number. 5030 require_LessThan[int](t, fromClient, int(float64(test.sampling*total/100)*1.35)) 5031 } 5032 }) 5033 } 5034 } 5035 5036 func TestMsgTraceAccDestWithSamplingJWTUpdate(t *testing.T) { 5037 // create system account 5038 sysKp, _ := nkeys.CreateAccount() 5039 sysPub, _ := sysKp.PublicKey() 5040 sysCreds := newUser(t, sysKp) 5041 // create account A 5042 akp, _ := nkeys.CreateAccount() 5043 aPub, _ := akp.PublicKey() 5044 claim := jwt.NewAccountClaims(aPub) 5045 claim.Trace = &jwt.MsgTrace{Destination: "acc.trace.dest"} 5046 aJwt, err := claim.Encode(oKp) 5047 require_NoError(t, err) 5048 5049 dir := t.TempDir() 5050 conf := createConfFile(t, []byte(fmt.Sprintf(` 5051 listen: -1 5052 operator: %s 5053 resolver: { 5054 type: full 5055 dir: '%s' 5056 } 5057 system_account: %s 5058 `, ojwt, dir, sysPub))) 5059 s, _ := RunServerWithConfig(conf) 5060 defer s.Shutdown() 5061 updateJwt(t, s.ClientURL(), sysCreds, aJwt, 1) 5062 5063 nc := natsConnect(t, s.ClientURL(), createUserCreds(t, nil, akp)) 5064 defer nc.Close() 5065 5066 sub := natsSubSync(t, nc, "acc.trace.dest") 5067 natsFlush(t, nc) 5068 5069 for iter, test := range []struct { 5070 name string 5071 sampling int 5072 }{ 5073 {"no sampling specified", 100}, 5074 {"sampling", 25}, 5075 {"set back sampling to 0", 100}, 5076 } { 5077 t.Run(test.name, func(t *testing.T) { 5078 if iter > 0 { 5079 claim.Trace = &jwt.MsgTrace{Destination: "acc.trace.dest", Sampling: test.sampling} 5080 aJwt, err = claim.Encode(oKp) 5081 require_NoError(t, err) 5082 updateJwt(t, s.ClientURL(), sysCreds, aJwt, 1) 5083 } 5084 5085 msg := nats.NewMsg("foo") 5086 msg.Header.Set(traceParentHdr, "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01") 5087 msg.Data = []byte("hello") 5088 5089 total := 400 5090 for i := 0; i < total; i++ { 5091 err := nc.PublishMsg(msg) 5092 require_NoError(t, err) 5093 } 5094 // Wait a bit to make sure that we received all traces that should 5095 // have been received. 5096 time.Sleep(500 * time.Millisecond) 5097 n, _, err := sub.Pending() 5098 require_NoError(t, err) 5099 for i := 0; i < n; i++ { 5100 msg = natsNexMsg(t, sub, time.Second) 5101 var e MsgTraceEvent 5102 err = json.Unmarshal(msg.Data, &e) 5103 require_NoError(t, err) 5104 } 5105 // Now check that if sampling was 100%, we have the total number 5106 // of published messages. 5107 if test.sampling == 100 { 5108 require_Equal[int](t, n, total) 5109 } else { 5110 // Otherwise, we should have no more (but let's be conservative) 5111 // than the sampling number. 5112 require_LessThan[int](t, n, int(float64(test.sampling*total/100)*1.35)) 5113 } 5114 }) 5115 } 5116 }