github.com/matrixorigin/matrixone@v1.2.0/pkg/proxy/router_test.go (about) 1 // Copyright 2021 - 2023 Matrix Origin 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package proxy 16 17 import ( 18 "context" 19 "fmt" 20 "os" 21 "testing" 22 "time" 23 24 "github.com/lni/goutils/leaktest" 25 "github.com/matrixorigin/matrixone/pkg/clusterservice" 26 "github.com/matrixorigin/matrixone/pkg/common/runtime" 27 "github.com/matrixorigin/matrixone/pkg/common/stopper" 28 "github.com/matrixorigin/matrixone/pkg/pb/metadata" 29 "github.com/stretchr/testify/require" 30 ) 31 32 func TestCNServer(t *testing.T) { 33 defer leaktest.AfterTest(t)() 34 temp := os.TempDir() 35 36 t.Run("error", func(t *testing.T) { 37 addr := fmt.Sprintf("%s/%d.sock", temp, time.Now().Nanosecond()) 38 require.NoError(t, os.RemoveAll(addr)) 39 cn := testMakeCNServer("", addr, 0, "", labelInfo{}) 40 c, err := cn.Connect(nil, 0) 41 require.Error(t, err) 42 require.Nil(t, c) 43 }) 44 45 t.Run("success", func(t *testing.T) { 46 ctx, cancel := context.WithCancel(context.TODO()) 47 defer cancel() 48 addr := fmt.Sprintf("%s/%d.sock", temp, time.Now().Nanosecond()) 49 require.NoError(t, os.RemoveAll(addr)) 50 stopFn := startTestCNServer(t, ctx, addr, nil) 51 defer func() { 52 require.NoError(t, stopFn()) 53 }() 54 cn := testMakeCNServer("", addr, 0, "", labelInfo{}) 55 c, err := cn.Connect(nil, 0) 56 require.NoError(t, err) 57 require.NotNil(t, c) 58 }) 59 } 60 61 func TestRouter_SelectEmptyCN(t *testing.T) { 62 defer leaktest.AfterTest(t)() 63 64 rt := runtime.DefaultRuntime() 65 runtime.SetupProcessLevelRuntime(rt) 66 logger := rt.Logger() 67 st := stopper.NewStopper("test-proxy", stopper.WithLogger(rt.Logger().RawLogger())) 68 defer st.Stop() 69 hc := &mockHAKeeperClient{} 70 hc.updateCN("cn1", "", map[string]metadata.LabelList{}) 71 72 mc := clusterservice.NewMOCluster(hc, 3*time.Second) 73 defer mc.Close() 74 rt.SetGlobalVariables(runtime.ClusterService, mc) 75 mc.ForceRefresh(true) 76 re := testRebalancer(t, st, logger, mc) 77 78 ru := newRouter(mc, re, true) 79 80 li1 := labelInfo{ 81 Tenant: "t1", 82 Labels: map[string]string{ 83 "k1": "v1", 84 }, 85 } 86 cn, err := ru.Route(context.TODO(), clientInfo{labelInfo: li1}, nil) 87 require.NoError(t, err) 88 require.NotNil(t, cn) 89 } 90 91 func TestRouter_RouteForCommon(t *testing.T) { 92 defer leaktest.AfterTest(t)() 93 94 rt := runtime.DefaultRuntime() 95 runtime.SetupProcessLevelRuntime(rt) 96 logger := rt.Logger() 97 st := stopper.NewStopper("test-proxy", stopper.WithLogger(rt.Logger().RawLogger())) 98 defer st.Stop() 99 hc := &mockHAKeeperClient{} 100 // Construct backend CN servers. 101 hc.updateCN("cn1", "", map[string]metadata.LabelList{ 102 tenantLabelKey: {Labels: []string{"t1"}}, 103 "k1": {Labels: []string{"v1"}}, 104 "k2": {Labels: []string{"v2"}}, 105 }) 106 107 mc := clusterservice.NewMOCluster(hc, 3*time.Second) 108 defer mc.Close() 109 rt.SetGlobalVariables(runtime.ClusterService, mc) 110 mc.ForceRefresh(true) 111 re := testRebalancer(t, st, logger, mc) 112 113 ru := newRouter(mc, re, true) 114 ctx := context.TODO() 115 116 li1 := labelInfo{ 117 Tenant: "t1", 118 Labels: map[string]string{ 119 "k1": "v1", 120 "k2": "v2", 121 }, 122 } 123 cn, err := ru.Route(ctx, clientInfo{labelInfo: li1}, nil) 124 require.NoError(t, err) 125 require.NotNil(t, cn) 126 127 li2 := labelInfo{ 128 Tenant: "t2", 129 Labels: map[string]string{ 130 "k1": "v1", 131 }, 132 } 133 cn, err = ru.Route(ctx, clientInfo{labelInfo: li2}, nil) 134 require.Error(t, err) 135 require.Nil(t, cn) 136 137 li3 := labelInfo{ 138 Tenant: "t1", 139 Labels: map[string]string{ 140 "k2": "v1", 141 }, 142 } 143 cn, err = ru.Route(ctx, clientInfo{labelInfo: li3}, nil) 144 require.Error(t, err) 145 require.Nil(t, cn) 146 147 // empty tenant means sys tenant. 148 li4 := labelInfo{ 149 Tenant: "", 150 Labels: map[string]string{ 151 "k2": "v1", 152 }, 153 } 154 cn, err = ru.Route(ctx, clientInfo{labelInfo: li4, username: "dump"}, nil) 155 require.NoError(t, err) 156 require.NotNil(t, cn) 157 158 cn, err = ru.Route(ctx, clientInfo{labelInfo: li4}, nil) 159 require.Error(t, err) 160 require.Nil(t, cn) 161 162 li5 := labelInfo{ 163 Tenant: "sys", 164 Labels: map[string]string{ 165 "k2": "v1", 166 }, 167 } 168 cn, err = ru.Route(ctx, clientInfo{labelInfo: li5, username: "dump"}, nil) 169 require.NoError(t, err) 170 require.NotNil(t, cn) 171 172 cn, err = ru.Route(ctx, clientInfo{labelInfo: li5}, nil) 173 require.Error(t, err) 174 require.Nil(t, cn) 175 } 176 177 func TestRouter_RouteForSys(t *testing.T) { 178 defer leaktest.AfterTest(t)() 179 180 rt := runtime.DefaultRuntime() 181 runtime.SetupProcessLevelRuntime(rt) 182 logger := rt.Logger() 183 st := stopper.NewStopper("test-proxy", stopper.WithLogger(rt.Logger().RawLogger())) 184 defer st.Stop() 185 hc := &mockHAKeeperClient{} 186 mc := clusterservice.NewMOCluster(hc, 3*time.Second) 187 defer mc.Close() 188 rt.SetGlobalVariables(runtime.ClusterService, mc) 189 re := testRebalancer(t, st, logger, mc) 190 ru := newRouter(mc, re, true) 191 li1 := labelInfo{ 192 Tenant: "sys", 193 } 194 195 hc.updateCN("cn1", "", map[string]metadata.LabelList{ 196 tenantLabelKey: {Labels: []string{"t1"}}, 197 }) 198 mc.ForceRefresh(true) 199 ctx := context.TODO() 200 cn, err := ru.Route(ctx, clientInfo{labelInfo: li1, username: "dump"}, nil) 201 require.NoError(t, err) 202 require.NotNil(t, cn) 203 require.Equal(t, "cn1", cn.uuid) 204 205 cn, err = ru.Route(ctx, clientInfo{labelInfo: li1}, nil) 206 require.Error(t, err) 207 require.Nil(t, cn) 208 209 hc.updateCN("cn2", "", map[string]metadata.LabelList{}) 210 mc.ForceRefresh(true) 211 cn, err = ru.Route(ctx, clientInfo{labelInfo: li1}, nil) 212 require.NoError(t, err) 213 require.NotNil(t, cn) 214 require.Equal(t, "cn2", cn.uuid) 215 216 hc.updateCN("cn3", "", map[string]metadata.LabelList{ 217 "k1": {Labels: []string{"v1"}}, 218 }) 219 mc.ForceRefresh(true) 220 cn, err = ru.Route(ctx, clientInfo{labelInfo: li1}, nil) 221 require.NoError(t, err) 222 require.NotNil(t, cn) 223 require.Equal(t, "cn3", cn.uuid) 224 225 hc.updateCN("cn4", "", map[string]metadata.LabelList{ 226 tenantLabelKey: {Labels: []string{"sys"}}, 227 }) 228 mc.ForceRefresh(true) 229 cn, err = ru.Route(ctx, clientInfo{labelInfo: li1}, nil) 230 require.NoError(t, err) 231 require.NotNil(t, cn) 232 require.Equal(t, "cn4", cn.uuid) 233 } 234 235 func TestRouter_SelectByConnID(t *testing.T) { 236 defer leaktest.AfterTest(t)() 237 238 ctx, cancel := context.WithCancel(context.TODO()) 239 defer cancel() 240 runtime.SetupProcessLevelRuntime(runtime.DefaultRuntime()) 241 rt := runtime.DefaultRuntime() 242 logger := rt.Logger() 243 st := stopper.NewStopper("test-proxy", stopper.WithLogger(rt.Logger().RawLogger())) 244 defer st.Stop() 245 re := testRebalancer(t, st, logger, nil) 246 247 temp := os.TempDir() 248 addr1 := fmt.Sprintf("%s/%d.sock", temp, time.Now().Nanosecond()) 249 require.NoError(t, os.RemoveAll(addr1)) 250 stopFn1 := startTestCNServer(t, ctx, addr1, nil) 251 defer func() { 252 require.NoError(t, stopFn1()) 253 }() 254 ru := newRouter(nil, re, true) 255 256 cn1 := testMakeCNServer("uuid1", addr1, 10, "", labelInfo{}) 257 _, _, err := ru.Connect(cn1, testPacket, nil) 258 require.NoError(t, err) 259 260 cn2, err := ru.SelectByConnID(10) 261 require.NoError(t, err) 262 require.NotNil(t, cn2) 263 require.Equal(t, cn1.uuid, cn2.uuid) 264 require.Equal(t, cn1.addr, cn2.addr) 265 266 cn3, err := ru.SelectByConnID(20) 267 require.Error(t, err) 268 require.Nil(t, cn3) 269 } 270 271 func TestRouter_ConnectAndSelectBalanced(t *testing.T) { 272 defer leaktest.AfterTest(t)() 273 274 ctx, cancel := context.WithCancel(context.TODO()) 275 defer cancel() 276 rt := runtime.DefaultRuntime() 277 runtime.SetupProcessLevelRuntime(rt) 278 logger := rt.Logger() 279 st := stopper.NewStopper("test-proxy", stopper.WithLogger(rt.Logger().RawLogger())) 280 defer st.Stop() 281 hc := &mockHAKeeperClient{} 282 // Construct backend CN servers. 283 temp := os.TempDir() 284 addr1 := fmt.Sprintf("%s/%d.sock", temp, time.Now().Nanosecond()) 285 require.NoError(t, os.RemoveAll(addr1)) 286 hc.updateCN("cn1", addr1, map[string]metadata.LabelList{ 287 tenantLabelKey: {Labels: []string{"t1"}}, 288 "k1": {Labels: []string{"v1"}}, 289 "k2": {Labels: []string{"v2"}}, 290 }) 291 stopFn1 := startTestCNServer(t, ctx, addr1, nil) 292 defer func() { 293 require.NoError(t, stopFn1()) 294 }() 295 296 addr2 := fmt.Sprintf("%s/%d.sock", temp, time.Now().Nanosecond()) 297 require.NoError(t, os.RemoveAll(addr2)) 298 hc.updateCN("cn2", addr2, map[string]metadata.LabelList{ 299 tenantLabelKey: {Labels: []string{"t1"}}, 300 "k1": {Labels: []string{"v1"}}, 301 "k2": {Labels: []string{"v2"}}, 302 }) 303 stopFn2 := startTestCNServer(t, ctx, addr2, nil) 304 defer func() { 305 require.NoError(t, stopFn2()) 306 }() 307 308 addr3 := fmt.Sprintf("%s/%d.sock", temp, time.Now().Nanosecond()) 309 require.NoError(t, os.RemoveAll(addr3)) 310 hc.updateCN("cn3", addr3, map[string]metadata.LabelList{ 311 tenantLabelKey: {Labels: []string{"t1"}}, 312 "k1": {Labels: []string{"v1"}}, 313 "k2": {Labels: []string{"v2"}}, 314 }) 315 stopFn3 := startTestCNServer(t, ctx, addr3, nil) 316 defer func() { 317 require.NoError(t, stopFn3()) 318 }() 319 320 mc := clusterservice.NewMOCluster(hc, 3*time.Second) 321 defer mc.Close() 322 rt.SetGlobalVariables(runtime.ClusterService, mc) 323 mc.ForceRefresh(true) 324 re := testRebalancer(t, st, logger, mc) 325 326 ru := newRouter(mc, re, true) 327 328 connResult := make(map[string]struct{}) 329 li1 := labelInfo{ 330 Tenant: "t1", 331 Labels: map[string]string{ 332 "k1": "v1", 333 "k2": "v2", 334 }, 335 } 336 cn, err := ru.Route(ctx, clientInfo{labelInfo: li1}, nil) 337 require.NoError(t, err) 338 require.NotNil(t, cn) 339 cn.addr = "unix://" + cn.addr 340 cn.salt = testSlat 341 tu1 := newTunnel(context.TODO(), logger, nil) 342 _, _, err = ru.Connect(cn, testPacket, tu1) 343 require.NoError(t, err) 344 connResult[cn.uuid] = struct{}{} 345 346 li2 := labelInfo{ 347 Tenant: "t1", 348 Labels: map[string]string{ 349 "k1": "v1", 350 "k2": "v2", 351 }, 352 } 353 cn, err = ru.Route(ctx, clientInfo{labelInfo: li2}, nil) 354 require.NoError(t, err) 355 require.NotNil(t, cn) 356 cn.addr = "unix://" + cn.addr 357 cn.salt = testSlat 358 tu2 := newTunnel(context.TODO(), logger, nil) 359 _, _, err = ru.Connect(cn, testPacket, tu2) 360 require.NoError(t, err) 361 connResult[cn.uuid] = struct{}{} 362 363 li3 := labelInfo{ 364 Tenant: "t1", 365 Labels: map[string]string{ 366 "k1": "v1", 367 "k2": "v2", 368 }, 369 } 370 cn, err = ru.Route(ctx, clientInfo{labelInfo: li3}, nil) 371 require.NoError(t, err) 372 require.NotNil(t, cn) 373 cn.addr = "unix://" + cn.addr 374 cn.salt = testSlat 375 tu3 := newTunnel(context.TODO(), logger, nil) 376 _, _, err = ru.Connect(cn, testPacket, tu3) 377 require.NoError(t, err) 378 connResult[cn.uuid] = struct{}{} 379 380 require.Equal(t, 3, len(connResult)) 381 } 382 383 func TestRouter_ConnectAndSelectSpecify(t *testing.T) { 384 defer leaktest.AfterTest(t)() 385 386 ctx, cancel := context.WithCancel(context.TODO()) 387 defer cancel() 388 rt := runtime.DefaultRuntime() 389 runtime.SetupProcessLevelRuntime(rt) 390 logger := rt.Logger() 391 st := stopper.NewStopper("test-proxy", stopper.WithLogger(rt.Logger().RawLogger())) 392 defer st.Stop() 393 hc := &mockHAKeeperClient{} 394 // Construct backend CN servers. 395 temp := os.TempDir() 396 addr1 := fmt.Sprintf("%s/%d.sock", temp, time.Now().Nanosecond()) 397 require.NoError(t, os.RemoveAll(addr1)) 398 hc.updateCN("cn1", addr1, map[string]metadata.LabelList{ 399 tenantLabelKey: {Labels: []string{"t1"}}, 400 "k1": {Labels: []string{"v1"}}, 401 "k2": {Labels: []string{"v2"}}, 402 }) 403 stopFn1 := startTestCNServer(t, ctx, addr1, nil) 404 defer func() { 405 require.NoError(t, stopFn1()) 406 }() 407 408 addr2 := fmt.Sprintf("%s/%d.sock", temp, time.Now().Nanosecond()) 409 require.NoError(t, os.RemoveAll(addr2)) 410 hc.updateCN("cn2", addr2, map[string]metadata.LabelList{ 411 tenantLabelKey: {Labels: []string{"t1"}}, 412 "k2": {Labels: []string{"v2"}}, 413 }) 414 stopFn2 := startTestCNServer(t, ctx, addr2, nil) 415 defer func() { 416 require.NoError(t, stopFn2()) 417 }() 418 419 addr3 := fmt.Sprintf("%s/%d.sock", temp, time.Now().Nanosecond()) 420 require.NoError(t, os.RemoveAll(addr3)) 421 hc.updateCN("cn3", addr3, map[string]metadata.LabelList{ 422 tenantLabelKey: {Labels: []string{"t1"}}, 423 "k2": {Labels: []string{"v2"}}, 424 }) 425 stopFn3 := startTestCNServer(t, ctx, addr3, nil) 426 defer func() { 427 require.NoError(t, stopFn3()) 428 }() 429 430 mc := clusterservice.NewMOCluster(hc, 3*time.Second) 431 defer mc.Close() 432 rt.SetGlobalVariables(runtime.ClusterService, mc) 433 mc.ForceRefresh(true) 434 re := testRebalancer(t, st, logger, mc) 435 436 ru := newRouter(mc, re, true) 437 438 connResult := make(map[string]struct{}) 439 li1 := labelInfo{ 440 Tenant: "t1", 441 Labels: map[string]string{ 442 "k2": "v2", 443 }, 444 } 445 cn, err := ru.Route(ctx, clientInfo{labelInfo: li1}, nil) 446 require.NoError(t, err) 447 require.NotNil(t, cn) 448 cn.addr = "unix://" + cn.addr 449 cn.salt = testSlat 450 tu1 := newTunnel(context.TODO(), logger, nil) 451 _, _, err = ru.Connect(cn, testPacket, tu1) 452 require.NoError(t, err) 453 connResult[cn.uuid] = struct{}{} 454 455 li2 := labelInfo{ 456 Tenant: "t1", 457 Labels: map[string]string{ 458 "k2": "v2", 459 }, 460 } 461 cn, err = ru.Route(ctx, clientInfo{labelInfo: li2}, nil) 462 require.NoError(t, err) 463 require.NotNil(t, cn) 464 cn.addr = "unix://" + cn.addr 465 cn.salt = testSlat 466 tu2 := newTunnel(context.TODO(), logger, nil) 467 _, _, err = ru.Connect(cn, testPacket, tu2) 468 require.NoError(t, err) 469 connResult[cn.uuid] = struct{}{} 470 471 li3 := labelInfo{ 472 Tenant: "t1", 473 Labels: map[string]string{ 474 "k2": "v2", 475 }, 476 } 477 cn, err = ru.Route(ctx, clientInfo{labelInfo: li3}, nil) 478 require.NoError(t, err) 479 require.NotNil(t, cn) 480 cn.addr = "unix://" + cn.addr 481 cn.salt = testSlat 482 tu3 := newTunnel(context.TODO(), logger, nil) 483 _, _, err = ru.Connect(cn, testPacket, tu3) 484 require.NoError(t, err) 485 connResult[cn.uuid] = struct{}{} 486 487 require.Equal(t, 2, len(connResult)) 488 } 489 490 func TestRouter_Filter(t *testing.T) { 491 defer leaktest.AfterTest(t)() 492 493 ctx, cancel := context.WithCancel(context.TODO()) 494 defer cancel() 495 rt := runtime.DefaultRuntime() 496 runtime.SetupProcessLevelRuntime(rt) 497 logger := rt.Logger() 498 st := stopper.NewStopper("test-proxy", stopper.WithLogger(rt.Logger().RawLogger())) 499 defer st.Stop() 500 hc := &mockHAKeeperClient{} 501 // Construct backend CN servers. 502 temp := os.TempDir() 503 addr1 := fmt.Sprintf("%s/%d.sock", temp, time.Now().Nanosecond()) 504 require.NoError(t, os.RemoveAll(addr1)) 505 hc.updateCN("cn1", addr1, map[string]metadata.LabelList{ 506 tenantLabelKey: {Labels: []string{"t1"}}, 507 "k1": {Labels: []string{"v1"}}, 508 "k2": {Labels: []string{"v2"}}, 509 }) 510 stopFn1 := startTestCNServer(t, ctx, addr1, nil) 511 defer func() { 512 require.NoError(t, stopFn1()) 513 }() 514 515 addr2 := fmt.Sprintf("%s/%d.sock", temp, time.Now().Nanosecond()) 516 require.NoError(t, os.RemoveAll(addr2)) 517 hc.updateCN("cn2", addr2, map[string]metadata.LabelList{ 518 tenantLabelKey: {Labels: []string{"t1"}}, 519 "k2": {Labels: []string{"v2"}}, 520 }) 521 stopFn2 := startTestCNServer(t, ctx, addr2, nil) 522 defer func() { 523 require.NoError(t, stopFn2()) 524 }() 525 526 mc := clusterservice.NewMOCluster(hc, 3*time.Second) 527 defer mc.Close() 528 rt.SetGlobalVariables(runtime.ClusterService, mc) 529 mc.ForceRefresh(true) 530 re := testRebalancer(t, st, logger, mc) 531 532 ru := newRouter(mc, re, true) 533 534 cn, err := ru.Route(ctx, clientInfo{username: "dump"}, func(s string) bool { 535 return s == addr1 536 }) 537 require.NoError(t, err) 538 require.NotNil(t, cn) 539 require.Equal(t, cn.uuid, "cn2") 540 } 541 542 func TestRouter_RetryableConnect(t *testing.T) { 543 defer leaktest.AfterTest(t)() 544 545 ctx, cancel := context.WithCancel(context.TODO()) 546 defer cancel() 547 rt := runtime.DefaultRuntime() 548 runtime.SetupProcessLevelRuntime(rt) 549 logger := rt.Logger() 550 st := stopper.NewStopper("test-proxy", stopper.WithLogger(rt.Logger().RawLogger())) 551 defer st.Stop() 552 hc := &mockHAKeeperClient{} 553 // Construct backend CN servers. 554 temp := os.TempDir() 555 addr1 := fmt.Sprintf("%s/%d.sock", temp, time.Now().Nanosecond()) 556 require.NoError(t, os.RemoveAll(addr1)) 557 hc.updateCN("cn1", addr1, map[string]metadata.LabelList{ 558 tenantLabelKey: {Labels: []string{"t1"}}, 559 }) 560 stopFn1 := startTestCNServer(t, ctx, addr1, nil) 561 defer func() { 562 require.NoError(t, stopFn1()) 563 }() 564 565 addr2 := fmt.Sprintf("%s/%d.sock", temp, time.Now().Nanosecond()) 566 require.NoError(t, os.RemoveAll(addr2)) 567 hc.updateCN("cn2", addr2, map[string]metadata.LabelList{ 568 tenantLabelKey: {Labels: []string{"t1"}}, 569 }) 570 // We do NOT start cn2 here, to return a retryable error. 571 572 addr3 := fmt.Sprintf("%s/%d.sock", temp, time.Now().Nanosecond()) 573 require.NoError(t, os.RemoveAll(addr3)) 574 hc.updateCN("cn3", addr3, map[string]metadata.LabelList{ 575 tenantLabelKey: {Labels: []string{"t1"}}, 576 }) 577 stopFn3 := startTestCNServer(t, ctx, addr3, nil, withBeforeHandle(func() { 578 time.Sleep(defaultAuthTimeout/3 + time.Second) 579 })) 580 defer func() { 581 require.NoError(t, stopFn3()) 582 }() 583 584 mc := clusterservice.NewMOCluster(hc, 3*time.Second) 585 defer mc.Close() 586 rt.SetGlobalVariables(runtime.ClusterService, mc) 587 mc.ForceRefresh(true) 588 re := testRebalancer(t, st, logger, mc) 589 590 ru := newRouter(mc, re, false) 591 592 li1 := labelInfo{ 593 Tenant: "t1", 594 } 595 cn, err := ru.Route(ctx, clientInfo{labelInfo: li1}, func(s string) bool { 596 // choose cn2 597 return s == addr1 || s == addr3 598 }) 599 require.NoError(t, err) 600 require.NotNil(t, cn) 601 cn.addr = "unix://" + cn.addr 602 cn.salt = testSlat 603 tu1 := newTunnel(context.TODO(), logger, nil) 604 _, _, err = ru.Connect(cn, testPacket, tu1) 605 require.True(t, isRetryableErr(err)) 606 607 cn, err = ru.Route(ctx, clientInfo{labelInfo: li1}, func(s string) bool { 608 // choose cn1 609 return s == addr2 || s == addr3 610 }) 611 require.NoError(t, err) 612 require.NotNil(t, cn) 613 cn.addr = "unix://" + cn.addr 614 cn.salt = testSlat 615 _, _, err = ru.Connect(cn, testPacket, tu1) 616 require.NoError(t, err) 617 require.Equal(t, "cn1", cn.uuid) 618 619 // could not connect to cn3, because of timeout. 620 cn, err = ru.Route(ctx, clientInfo{labelInfo: li1}, func(s string) bool { 621 // choose cn3 622 return s == addr1 || s == addr2 623 }) 624 require.NoError(t, err) 625 require.NotNil(t, cn) 626 cn.addr = "unix://" + cn.addr 627 cn.salt = testSlat 628 tu3 := newTunnel(context.TODO(), logger, nil) 629 _, _, err = ru.Connect(cn, testPacket, tu3) 630 require.True(t, isRetryableErr(err)) 631 }