github.com/outbrain/consul@v1.4.5/agent/agent_endpoint_test.go (about) 1 package agent 2 3 import ( 4 "bytes" 5 "crypto/tls" 6 "crypto/x509" 7 "encoding/json" 8 "fmt" 9 "io" 10 "io/ioutil" 11 "net/http" 12 "net/http/httptest" 13 "os" 14 "reflect" 15 "strings" 16 "testing" 17 "time" 18 19 "github.com/hashicorp/consul/acl" 20 "github.com/hashicorp/consul/agent/checks" 21 "github.com/hashicorp/consul/agent/config" 22 "github.com/hashicorp/consul/agent/connect" 23 "github.com/hashicorp/consul/agent/debug" 24 "github.com/hashicorp/consul/agent/local" 25 "github.com/hashicorp/consul/agent/structs" 26 tokenStore "github.com/hashicorp/consul/agent/token" 27 "github.com/hashicorp/consul/api" 28 "github.com/hashicorp/consul/lib" 29 "github.com/hashicorp/consul/logger" 30 "github.com/hashicorp/consul/testrpc" 31 "github.com/hashicorp/consul/testutil/retry" 32 "github.com/hashicorp/consul/types" 33 "github.com/hashicorp/go-uuid" 34 "github.com/hashicorp/serf/serf" 35 "github.com/mitchellh/copystructure" 36 "github.com/stretchr/testify/assert" 37 "github.com/stretchr/testify/require" 38 ) 39 40 func makeReadOnlyAgentACL(t *testing.T, srv *HTTPServer) string { 41 args := map[string]interface{}{ 42 "Name": "User Token", 43 "Type": "client", 44 "Rules": `agent "" { policy = "read" }`, 45 } 46 req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args)) 47 resp := httptest.NewRecorder() 48 obj, err := srv.ACLCreate(resp, req) 49 if err != nil { 50 t.Fatalf("err: %v", err) 51 } 52 aclResp := obj.(aclCreateResponse) 53 return aclResp.ID 54 } 55 56 func TestAgent_Services(t *testing.T) { 57 t.Parallel() 58 a := NewTestAgent(t, t.Name(), "") 59 defer a.Shutdown() 60 61 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 62 srv1 := &structs.NodeService{ 63 ID: "mysql", 64 Service: "mysql", 65 Tags: []string{"master"}, 66 Meta: map[string]string{ 67 "foo": "bar", 68 }, 69 Port: 5000, 70 } 71 require.NoError(t, a.State.AddService(srv1, "")) 72 73 // Add a managed proxy for that service 74 prxy1 := &structs.ConnectManagedProxy{ 75 ExecMode: structs.ProxyExecModeScript, 76 Command: []string{"proxy.sh"}, 77 Config: map[string]interface{}{ 78 "bind_port": 1234, 79 "foo": "bar", 80 }, 81 TargetServiceID: "mysql", 82 Upstreams: structs.TestUpstreams(t), 83 } 84 _, err := a.State.AddProxy(prxy1, "", "") 85 require.NoError(t, err) 86 87 req, _ := http.NewRequest("GET", "/v1/agent/services", nil) 88 obj, err := a.srv.AgentServices(nil, req) 89 if err != nil { 90 t.Fatalf("Err: %v", err) 91 } 92 val := obj.(map[string]*api.AgentService) 93 assert.Lenf(t, val, 1, "bad services: %v", obj) 94 assert.Equal(t, 5000, val["mysql"].Port) 95 assert.Equal(t, srv1.Meta, val["mysql"].Meta) 96 require.NotNil(t, val["mysql"].Connect) 97 require.NotNil(t, val["mysql"].Connect.Proxy) 98 assert.Equal(t, prxy1.ExecMode.String(), string(val["mysql"].Connect.Proxy.ExecMode)) 99 assert.Equal(t, prxy1.Command, val["mysql"].Connect.Proxy.Command) 100 assert.Equal(t, prxy1.Config, val["mysql"].Connect.Proxy.Config) 101 assert.Equal(t, prxy1.Upstreams.ToAPI(), val["mysql"].Connect.Proxy.Upstreams) 102 } 103 104 // This tests that the agent services endpoint (/v1/agent/services) returns 105 // Connect proxies. 106 func TestAgent_Services_ExternalConnectProxy(t *testing.T) { 107 t.Parallel() 108 109 assert := assert.New(t) 110 a := NewTestAgent(t, t.Name(), "") 111 defer a.Shutdown() 112 113 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 114 srv1 := &structs.NodeService{ 115 Kind: structs.ServiceKindConnectProxy, 116 ID: "db-proxy", 117 Service: "db-proxy", 118 Port: 5000, 119 Proxy: structs.ConnectProxyConfig{ 120 DestinationServiceName: "db", 121 Upstreams: structs.TestUpstreams(t), 122 }, 123 } 124 a.State.AddService(srv1, "") 125 126 req, _ := http.NewRequest("GET", "/v1/agent/services", nil) 127 obj, err := a.srv.AgentServices(nil, req) 128 assert.Nil(err) 129 val := obj.(map[string]*api.AgentService) 130 assert.Len(val, 1) 131 actual := val["db-proxy"] 132 assert.Equal(api.ServiceKindConnectProxy, actual.Kind) 133 assert.Equal(srv1.Proxy.ToAPI(), actual.Proxy) 134 135 // DEPRECATED (ProxyDestination) - remove the next comment and assertion 136 // Should still have deprecated ProxyDestination filled in until we remove it 137 // completely at a major version bump. 138 assert.Equal(srv1.Proxy.DestinationServiceName, actual.ProxyDestination) 139 } 140 141 // Thie tests that a sidecar-registered service is returned as expected. 142 func TestAgent_Services_Sidecar(t *testing.T) { 143 t.Parallel() 144 145 require := require.New(t) 146 assert := assert.New(t) 147 a := NewTestAgent(t, t.Name(), "") 148 defer a.Shutdown() 149 150 testrpc.WaitForLeader(t, a.RPC, "dc1") 151 srv1 := &structs.NodeService{ 152 Kind: structs.ServiceKindConnectProxy, 153 ID: "db-sidecar-proxy", 154 Service: "db-sidecar-proxy", 155 Port: 5000, 156 // Set this internal state that we expect sidecar registrations to have. 157 LocallyRegisteredAsSidecar: true, 158 Proxy: structs.ConnectProxyConfig{ 159 DestinationServiceName: "db", 160 Upstreams: structs.TestUpstreams(t), 161 }, 162 } 163 a.State.AddService(srv1, "") 164 165 req, _ := http.NewRequest("GET", "/v1/agent/services", nil) 166 obj, err := a.srv.AgentServices(nil, req) 167 require.NoError(err) 168 val := obj.(map[string]*api.AgentService) 169 assert.Len(val, 1) 170 actual := val["db-sidecar-proxy"] 171 require.NotNil(actual) 172 assert.Equal(api.ServiceKindConnectProxy, actual.Kind) 173 assert.Equal(srv1.Proxy.ToAPI(), actual.Proxy) 174 175 // DEPRECATED (ProxyDestination) - remove the next comment and assertion 176 // Should still have deprecated ProxyDestination filled in until we remove it 177 // completely at a major version bump. 178 assert.Equal(srv1.Proxy.DestinationServiceName, actual.ProxyDestination) 179 180 // Sanity check that LocalRegisteredAsSidecar is not in the output (assuming 181 // JSON encoding). Right now this is not the case because the services 182 // endpoint happens to use the api struct which doesn't include that field, 183 // but this test serves as a regression test incase we change the endpoint to 184 // return the internal struct later and accidentally expose some "internal" 185 // state. 186 output, err := json.Marshal(obj) 187 require.NoError(err) 188 assert.NotContains(string(output), "LocallyRegisteredAsSidecar") 189 assert.NotContains(string(output), "locally_registered_as_sidecar") 190 } 191 192 func TestAgent_Services_ACLFilter(t *testing.T) { 193 t.Parallel() 194 a := NewTestAgent(t, t.Name(), TestACLConfig()) 195 defer a.Shutdown() 196 197 testrpc.WaitForLeader(t, a.RPC, "dc1") 198 srv1 := &structs.NodeService{ 199 ID: "mysql", 200 Service: "mysql", 201 Tags: []string{"master"}, 202 Port: 5000, 203 } 204 a.State.AddService(srv1, "") 205 206 t.Run("no token", func(t *testing.T) { 207 req, _ := http.NewRequest("GET", "/v1/agent/services", nil) 208 obj, err := a.srv.AgentServices(nil, req) 209 if err != nil { 210 t.Fatalf("Err: %v", err) 211 } 212 val := obj.(map[string]*api.AgentService) 213 if len(val) != 0 { 214 t.Fatalf("bad: %v", obj) 215 } 216 }) 217 218 t.Run("root token", func(t *testing.T) { 219 req, _ := http.NewRequest("GET", "/v1/agent/services?token=root", nil) 220 obj, err := a.srv.AgentServices(nil, req) 221 if err != nil { 222 t.Fatalf("Err: %v", err) 223 } 224 val := obj.(map[string]*api.AgentService) 225 if len(val) != 1 { 226 t.Fatalf("bad: %v", obj) 227 } 228 }) 229 } 230 231 func TestAgent_Service(t *testing.T) { 232 t.Parallel() 233 234 a := NewTestAgent(t, t.Name(), TestACLConfig()+` 235 services { 236 name = "web" 237 port = 8181 238 } 239 `) 240 defer a.Shutdown() 241 testrpc.WaitForLeader(t, a.RPC, "dc1") 242 243 proxy := structs.TestConnectProxyConfig(t) 244 proxy.DestinationServiceID = "web1" 245 246 // Define a valid local sidecar proxy service 247 sidecarProxy := &structs.ServiceDefinition{ 248 Kind: structs.ServiceKindConnectProxy, 249 Name: "web-sidecar-proxy", 250 Check: structs.CheckType{ 251 TCP: "127.0.0.1:8000", 252 Interval: 10 * time.Second, 253 }, 254 Port: 8000, 255 Proxy: &proxy, 256 Weights: &structs.Weights{ 257 Passing: 1, 258 Warning: 1, 259 }, 260 } 261 262 // Define an updated version. Be careful to copy it. 263 updatedProxy := *sidecarProxy 264 updatedProxy.Port = 9999 265 266 // Mangle the proxy config/upstreams into the expected for with defaults and 267 // API struct types. 268 expectProxy := proxy 269 expectProxy.Upstreams = 270 structs.TestAddDefaultsToUpstreams(t, sidecarProxy.Proxy.Upstreams) 271 272 expectedResponse := &api.AgentService{ 273 Kind: api.ServiceKindConnectProxy, 274 ID: "web-sidecar-proxy", 275 Service: "web-sidecar-proxy", 276 Port: 8000, 277 Proxy: expectProxy.ToAPI(), 278 ContentHash: "3442362e971c43d1", 279 Weights: api.AgentWeights{ 280 Passing: 1, 281 Warning: 1, 282 }, 283 } 284 285 // Copy and modify 286 updatedResponse := *expectedResponse 287 updatedResponse.Port = 9999 288 updatedResponse.ContentHash = "90b5c19bf0f5073" 289 290 // Simple response for non-proxy service registered in TestAgent config 291 expectWebResponse := &api.AgentService{ 292 ID: "web", 293 Service: "web", 294 Port: 8181, 295 ContentHash: "69351c1ac865b034", 296 Weights: api.AgentWeights{ 297 Passing: 1, 298 Warning: 1, 299 }, 300 } 301 302 tests := []struct { 303 name string 304 tokenRules string 305 url string 306 updateFunc func() 307 wantWait time.Duration 308 wantCode int 309 wantErr string 310 wantResp *api.AgentService 311 }{ 312 { 313 name: "simple fetch - proxy", 314 url: "/v1/agent/service/web-sidecar-proxy", 315 wantCode: 200, 316 wantResp: expectedResponse, 317 }, 318 { 319 name: "simple fetch - non-proxy", 320 url: "/v1/agent/service/web", 321 wantCode: 200, 322 wantResp: expectWebResponse, 323 }, 324 { 325 name: "blocking fetch timeout, no change", 326 url: "/v1/agent/service/web-sidecar-proxy?hash=" + expectedResponse.ContentHash + "&wait=100ms", 327 wantWait: 100 * time.Millisecond, 328 wantCode: 200, 329 wantResp: expectedResponse, 330 }, 331 { 332 name: "blocking fetch old hash should return immediately", 333 url: "/v1/agent/service/web-sidecar-proxy?hash=123456789abcd&wait=10m", 334 wantCode: 200, 335 wantResp: expectedResponse, 336 }, 337 { 338 name: "blocking fetch returns change", 339 url: "/v1/agent/service/web-sidecar-proxy?hash=" + expectedResponse.ContentHash, 340 updateFunc: func() { 341 time.Sleep(100 * time.Millisecond) 342 // Re-register with new proxy config, make sure we copy the struct so we 343 // don't alter it and affect later test cases. 344 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(updatedProxy)) 345 resp := httptest.NewRecorder() 346 _, err := a.srv.AgentRegisterService(resp, req) 347 require.NoError(t, err) 348 require.Equal(t, 200, resp.Code, "body: %s", resp.Body.String()) 349 }, 350 wantWait: 100 * time.Millisecond, 351 wantCode: 200, 352 wantResp: &updatedResponse, 353 }, 354 { 355 // This test exercises a case that caused a busy loop to eat CPU for the 356 // entire duration of the blocking query. If a service gets re-registered 357 // wth same proxy config then the old proxy config chan is closed causing 358 // blocked watchset.Watch to return false indicating a change. But since 359 // the hash is the same when the blocking fn is re-called we should just 360 // keep blocking on the next iteration. The bug hit was that the WatchSet 361 // ws was not being reset in the loop and so when you try to `Watch` it 362 // the second time it just returns immediately making the blocking loop 363 // into a busy-poll! 364 // 365 // This test though doesn't catch that because busy poll still has the 366 // correct external behavior. I don't want to instrument the loop to 367 // assert it's not executing too fast here as I can't think of a clean way 368 // and the issue is fixed now so this test doesn't actually catch the 369 // error, but does provide an easy way to verify the behavior by hand: 370 // 1. Make this test fail e.g. change wantErr to true 371 // 2. Add a log.Println or similar into the blocking loop/function 372 // 3. See whether it's called just once or many times in a tight loop. 373 name: "blocking fetch interrupted with no change (same hash)", 374 url: "/v1/agent/service/web-sidecar-proxy?wait=200ms&hash=" + expectedResponse.ContentHash, 375 updateFunc: func() { 376 time.Sleep(100 * time.Millisecond) 377 // Re-register with _same_ proxy config 378 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(sidecarProxy)) 379 resp := httptest.NewRecorder() 380 _, err := a.srv.AgentRegisterService(resp, req) 381 require.NoError(t, err) 382 require.Equal(t, 200, resp.Code, "body: %s", resp.Body.String()) 383 }, 384 wantWait: 200 * time.Millisecond, 385 wantCode: 200, 386 wantResp: expectedResponse, 387 }, 388 { 389 // When we reload config, the agent pauses Anti-entropy, then clears all 390 // services (which causes their watch chans to be closed) before loading 391 // state from config/snapshot again). If we do that naively then we don't 392 // just get a spurios wakeup on the watch if the service didn't change, 393 // but we get it wakeup and then race with the reload and probably see no 394 // services and return a 404 error which is gross. This test excercises 395 // that - even though the registrations were from API not config, they are 396 // persisted and cleared/reloaded from snapshot which has same effect. 397 // 398 // The fix for this test is to allow the same mechanism that pauses 399 // Anti-entropy during reload to also pause the hash blocking loop so we 400 // don't resume until the state is reloaded and we get a chance to see if 401 // it actually changed or not. 402 name: "blocking fetch interrupted by reload shouldn't 404 - no change", 403 url: "/v1/agent/service/web-sidecar-proxy?wait=200ms&hash=" + expectedResponse.ContentHash, 404 updateFunc: func() { 405 time.Sleep(100 * time.Millisecond) 406 // Reload 407 require.NoError(t, a.ReloadConfig(a.Config)) 408 }, 409 // Should eventually timeout since there is no actual change 410 wantWait: 200 * time.Millisecond, 411 wantCode: 200, 412 wantResp: expectedResponse, 413 }, 414 { 415 // As above but test actually altering the service with the config reload. 416 // This simulates the API registration being overridden by a different one 417 // on disk during reload. 418 name: "blocking fetch interrupted by reload shouldn't 404 - changes", 419 url: "/v1/agent/service/web-sidecar-proxy?wait=10m&hash=" + expectedResponse.ContentHash, 420 updateFunc: func() { 421 time.Sleep(100 * time.Millisecond) 422 // Reload 423 newConfig := *a.Config 424 newConfig.Services = append(newConfig.Services, &updatedProxy) 425 require.NoError(t, a.ReloadConfig(&newConfig)) 426 }, 427 wantWait: 100 * time.Millisecond, 428 wantCode: 200, 429 wantResp: &updatedResponse, 430 }, 431 { 432 name: "err: non-existent proxy", 433 url: "/v1/agent/service/nope", 434 wantCode: 404, 435 }, 436 { 437 name: "err: bad ACL for service", 438 url: "/v1/agent/service/web-sidecar-proxy", 439 // Limited token doesn't grant read to the service 440 tokenRules: ` 441 key "" { 442 policy = "read" 443 } 444 `, 445 // Note that because we return ErrPermissionDenied and handle writing 446 // status at a higher level helper this actually gets a 200 in this test 447 // case so just assert that it was an error. 448 wantErr: "Permission denied", 449 }, 450 { 451 name: "good ACL for service", 452 url: "/v1/agent/service/web-sidecar-proxy", 453 // Limited token doesn't grant read to the service 454 tokenRules: ` 455 service "web-sidecar-proxy" { 456 policy = "read" 457 } 458 `, 459 wantCode: 200, 460 wantResp: expectedResponse, 461 }, 462 } 463 464 for _, tt := range tests { 465 t.Run(tt.name, func(t *testing.T) { 466 assert := assert.New(t) 467 require := require.New(t) 468 469 // Register the basic service to ensure it's in a known state to start. 470 { 471 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(sidecarProxy)) 472 resp := httptest.NewRecorder() 473 _, err := a.srv.AgentRegisterService(resp, req) 474 require.NoError(err) 475 require.Equal(200, resp.Code, "body: %s", resp.Body.String()) 476 } 477 478 req, _ := http.NewRequest("GET", tt.url, nil) 479 480 // Inject the root token for tests that don't care about ACL 481 var token = "root" 482 if tt.tokenRules != "" { 483 // Create new token and use that. 484 token = testCreateToken(t, a, tt.tokenRules) 485 } 486 req.Header.Set("X-Consul-Token", token) 487 resp := httptest.NewRecorder() 488 if tt.updateFunc != nil { 489 go tt.updateFunc() 490 } 491 start := time.Now() 492 obj, err := a.srv.AgentService(resp, req) 493 elapsed := time.Now().Sub(start) 494 495 if tt.wantErr != "" { 496 require.Error(err) 497 require.Contains(strings.ToLower(err.Error()), strings.ToLower(tt.wantErr)) 498 } else { 499 require.NoError(err) 500 } 501 if tt.wantCode != 0 { 502 require.Equal(tt.wantCode, resp.Code, "body: %s", resp.Body.String()) 503 } 504 if tt.wantWait != 0 { 505 assert.True(elapsed >= tt.wantWait, "should have waited at least %s, "+ 506 "took %s", tt.wantWait, elapsed) 507 } else { 508 assert.True(elapsed < 10*time.Millisecond, "should not have waited, "+ 509 "took %s", elapsed) 510 } 511 512 if tt.wantResp != nil { 513 assert.Equal(tt.wantResp, obj) 514 assert.Equal(tt.wantResp.ContentHash, resp.Header().Get("X-Consul-ContentHash")) 515 } else { 516 // Janky but Equal doesn't help here because nil != 517 // *api.AgentService((*api.AgentService)(nil)) 518 assert.Nil(obj) 519 } 520 }) 521 } 522 } 523 524 // DEPRECATED(managed-proxies) - remove this In the interim, we need the newer 525 // /agent/service/service to work for managed proxies so we can swithc the built 526 // in proxy to use only that without breaking managed proxies early. 527 func TestAgent_Service_DeprecatedManagedProxy(t *testing.T) { 528 t.Parallel() 529 a := NewTestAgent(t, t.Name(), ` 530 connect { 531 proxy { 532 allow_managed_api_registration = true 533 } 534 } 535 `) 536 defer a.Shutdown() 537 538 testrpc.WaitForLeader(t, a.RPC, "dc1") 539 540 svc := &structs.ServiceDefinition{ 541 Name: "web", 542 Port: 8000, 543 Check: structs.CheckType{ 544 TTL: 10 * time.Second, 545 }, 546 Connect: &structs.ServiceConnect{ 547 Proxy: &structs.ServiceDefinitionConnectProxy{ 548 // Fix the command otherwise the executable path ends up being random 549 // temp dir in every test run so the ContentHash will never match. 550 Command: []string{"foo"}, 551 Config: map[string]interface{}{ 552 "foo": "bar", 553 "bind_address": "10.10.10.10", 554 "bind_port": 9999, // make this deterministic 555 }, 556 Upstreams: structs.TestUpstreams(t), 557 }, 558 }, 559 } 560 561 require := require.New(t) 562 563 rr := httptest.NewRecorder() 564 565 req, _ := http.NewRequest("POST", "/v1/agent/services/register", jsonReader(svc)) 566 _, err := a.srv.AgentRegisterService(rr, req) 567 require.NoError(err) 568 require.Equal(200, rr.Code, "body:\n"+rr.Body.String()) 569 570 rr = httptest.NewRecorder() 571 req, _ = http.NewRequest("GET", "/v1/agent/service/web-proxy", nil) 572 obj, err := a.srv.AgentService(rr, req) 573 require.NoError(err) 574 require.Equal(200, rr.Code, "body:\n"+rr.Body.String()) 575 576 gotService, ok := obj.(*api.AgentService) 577 require.True(ok) 578 579 expect := &api.AgentService{ 580 Kind: api.ServiceKindConnectProxy, 581 ID: "web-proxy", 582 Service: "web-proxy", 583 Port: 9999, 584 Address: "10.10.10.10", 585 ContentHash: "e24f099e42e88317", 586 Proxy: &api.AgentServiceConnectProxyConfig{ 587 DestinationServiceID: "web", 588 DestinationServiceName: "web", 589 LocalServiceAddress: "127.0.0.1", 590 LocalServicePort: 8000, 591 Config: map[string]interface{}{ 592 "foo": "bar", 593 "bind_port": 9999, 594 "bind_address": "10.10.10.10", 595 "local_service_address": "127.0.0.1:8000", 596 }, 597 Upstreams: structs.TestAddDefaultsToUpstreams(t, svc.Connect.Proxy.Upstreams).ToAPI(), 598 }, 599 } 600 601 require.Equal(expect, gotService) 602 } 603 604 func TestAgent_Checks(t *testing.T) { 605 t.Parallel() 606 a := NewTestAgent(t, t.Name(), "") 607 defer a.Shutdown() 608 609 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 610 chk1 := &structs.HealthCheck{ 611 Node: a.Config.NodeName, 612 CheckID: "mysql", 613 Name: "mysql", 614 Status: api.HealthPassing, 615 } 616 a.State.AddCheck(chk1, "") 617 618 req, _ := http.NewRequest("GET", "/v1/agent/checks", nil) 619 obj, err := a.srv.AgentChecks(nil, req) 620 if err != nil { 621 t.Fatalf("Err: %v", err) 622 } 623 val := obj.(map[types.CheckID]*structs.HealthCheck) 624 if len(val) != 1 { 625 t.Fatalf("bad checks: %v", obj) 626 } 627 if val["mysql"].Status != api.HealthPassing { 628 t.Fatalf("bad check: %v", obj) 629 } 630 } 631 632 func TestAgent_HealthServiceByID(t *testing.T) { 633 t.Parallel() 634 a := NewTestAgent(t, t.Name(), "") 635 defer a.Shutdown() 636 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 637 638 service := &structs.NodeService{ 639 ID: "mysql", 640 Service: "mysql", 641 } 642 if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil { 643 t.Fatalf("err: %v", err) 644 } 645 service = &structs.NodeService{ 646 ID: "mysql2", 647 Service: "mysql2", 648 } 649 if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil { 650 t.Fatalf("err: %v", err) 651 } 652 service = &structs.NodeService{ 653 ID: "mysql3", 654 Service: "mysql3", 655 } 656 if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil { 657 t.Fatalf("err: %v", err) 658 } 659 660 chk1 := &structs.HealthCheck{ 661 Node: a.Config.NodeName, 662 CheckID: "mysql", 663 Name: "mysql", 664 ServiceID: "mysql", 665 Status: api.HealthPassing, 666 } 667 err := a.State.AddCheck(chk1, "") 668 if err != nil { 669 t.Fatalf("Err: %v", err) 670 } 671 672 chk2 := &structs.HealthCheck{ 673 Node: a.Config.NodeName, 674 CheckID: "mysql", 675 Name: "mysql", 676 ServiceID: "mysql", 677 Status: api.HealthPassing, 678 } 679 err = a.State.AddCheck(chk2, "") 680 if err != nil { 681 t.Fatalf("Err: %v", err) 682 } 683 684 chk3 := &structs.HealthCheck{ 685 Node: a.Config.NodeName, 686 CheckID: "mysql2", 687 Name: "mysql2", 688 ServiceID: "mysql2", 689 Status: api.HealthPassing, 690 } 691 err = a.State.AddCheck(chk3, "") 692 if err != nil { 693 t.Fatalf("Err: %v", err) 694 } 695 696 chk4 := &structs.HealthCheck{ 697 Node: a.Config.NodeName, 698 CheckID: "mysql2", 699 Name: "mysql2", 700 ServiceID: "mysql2", 701 Status: api.HealthWarning, 702 } 703 err = a.State.AddCheck(chk4, "") 704 if err != nil { 705 t.Fatalf("Err: %v", err) 706 } 707 708 chk5 := &structs.HealthCheck{ 709 Node: a.Config.NodeName, 710 CheckID: "mysql3", 711 Name: "mysql3", 712 ServiceID: "mysql3", 713 Status: api.HealthMaint, 714 } 715 err = a.State.AddCheck(chk5, "") 716 if err != nil { 717 t.Fatalf("Err: %v", err) 718 } 719 720 chk6 := &structs.HealthCheck{ 721 Node: a.Config.NodeName, 722 CheckID: "mysql3", 723 Name: "mysql3", 724 ServiceID: "mysql3", 725 Status: api.HealthCritical, 726 } 727 err = a.State.AddCheck(chk6, "") 728 if err != nil { 729 t.Fatalf("Err: %v", err) 730 } 731 732 eval := func(t *testing.T, url string, expectedCode int, expected string) { 733 t.Helper() 734 t.Run("format=text", func(t *testing.T) { 735 t.Helper() 736 req, _ := http.NewRequest("GET", url+"?format=text", nil) 737 resp := httptest.NewRecorder() 738 data, err := a.srv.AgentHealthServiceByID(resp, req) 739 codeWithPayload, ok := err.(CodeWithPayloadError) 740 if !ok { 741 t.Fatalf("Err: %v", err) 742 } 743 if got, want := codeWithPayload.StatusCode, expectedCode; got != want { 744 t.Fatalf("returned bad status: expected %d, but had: %d in %#v", expectedCode, codeWithPayload.StatusCode, codeWithPayload) 745 } 746 body, ok := data.(string) 747 if !ok { 748 t.Fatalf("Cannot get result as string in := %#v", data) 749 } 750 if got, want := body, expected; got != want { 751 t.Fatalf("got body %q want %q", got, want) 752 } 753 if got, want := codeWithPayload.Reason, expected; got != want { 754 t.Fatalf("got body %q want %q", got, want) 755 } 756 }) 757 t.Run("format=json", func(t *testing.T) { 758 req, _ := http.NewRequest("GET", url, nil) 759 resp := httptest.NewRecorder() 760 dataRaw, err := a.srv.AgentHealthServiceByID(resp, req) 761 codeWithPayload, ok := err.(CodeWithPayloadError) 762 if !ok { 763 t.Fatalf("Err: %v", err) 764 } 765 if got, want := codeWithPayload.StatusCode, expectedCode; got != want { 766 t.Fatalf("returned bad status: expected %d, but had: %d in %#v", expectedCode, codeWithPayload.StatusCode, codeWithPayload) 767 } 768 data, ok := dataRaw.(*api.AgentServiceChecksInfo) 769 if !ok { 770 t.Fatalf("Cannot connvert result to JSON: %#v", dataRaw) 771 } 772 if codeWithPayload.StatusCode != http.StatusNotFound { 773 if data != nil && data.AggregatedStatus != expected { 774 t.Fatalf("got body %v want %v", data, expected) 775 } 776 } 777 }) 778 } 779 780 t.Run("passing checks", func(t *testing.T) { 781 eval(t, "/v1/agent/health/service/id/mysql", http.StatusOK, "passing") 782 }) 783 t.Run("warning checks", func(t *testing.T) { 784 eval(t, "/v1/agent/health/service/id/mysql2", http.StatusTooManyRequests, "warning") 785 }) 786 t.Run("critical checks", func(t *testing.T) { 787 eval(t, "/v1/agent/health/service/id/mysql3", http.StatusServiceUnavailable, "critical") 788 }) 789 t.Run("unknown serviceid", func(t *testing.T) { 790 eval(t, "/v1/agent/health/service/id/mysql1", http.StatusNotFound, "ServiceId mysql1 not found") 791 }) 792 793 nodeCheck := &structs.HealthCheck{ 794 Node: a.Config.NodeName, 795 CheckID: "diskCheck", 796 Name: "diskCheck", 797 Status: api.HealthCritical, 798 } 799 err = a.State.AddCheck(nodeCheck, "") 800 801 if err != nil { 802 t.Fatalf("Err: %v", err) 803 } 804 t.Run("critical check on node", func(t *testing.T) { 805 eval(t, "/v1/agent/health/service/id/mysql", http.StatusServiceUnavailable, "critical") 806 }) 807 808 err = a.State.RemoveCheck(nodeCheck.CheckID) 809 if err != nil { 810 t.Fatalf("Err: %v", err) 811 } 812 nodeCheck = &structs.HealthCheck{ 813 Node: a.Config.NodeName, 814 CheckID: "_node_maintenance", 815 Name: "_node_maintenance", 816 Status: api.HealthMaint, 817 } 818 err = a.State.AddCheck(nodeCheck, "") 819 if err != nil { 820 t.Fatalf("Err: %v", err) 821 } 822 t.Run("maintenance check on node", func(t *testing.T) { 823 eval(t, "/v1/agent/health/service/id/mysql", http.StatusServiceUnavailable, "maintenance") 824 }) 825 } 826 827 func TestAgent_HealthServiceByName(t *testing.T) { 828 t.Parallel() 829 a := NewTestAgent(t, t.Name(), "") 830 defer a.Shutdown() 831 832 service := &structs.NodeService{ 833 ID: "mysql1", 834 Service: "mysql-pool-r", 835 } 836 if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil { 837 t.Fatalf("err: %v", err) 838 } 839 service = &structs.NodeService{ 840 ID: "mysql2", 841 Service: "mysql-pool-r", 842 } 843 if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil { 844 t.Fatalf("err: %v", err) 845 } 846 service = &structs.NodeService{ 847 ID: "mysql3", 848 Service: "mysql-pool-rw", 849 } 850 if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil { 851 t.Fatalf("err: %v", err) 852 } 853 service = &structs.NodeService{ 854 ID: "mysql4", 855 Service: "mysql-pool-rw", 856 } 857 if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil { 858 t.Fatalf("err: %v", err) 859 } 860 service = &structs.NodeService{ 861 ID: "httpd1", 862 Service: "httpd", 863 } 864 if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil { 865 t.Fatalf("err: %v", err) 866 } 867 service = &structs.NodeService{ 868 ID: "httpd2", 869 Service: "httpd", 870 } 871 if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil { 872 t.Fatalf("err: %v", err) 873 } 874 875 chk1 := &structs.HealthCheck{ 876 Node: a.Config.NodeName, 877 CheckID: "mysql1", 878 Name: "mysql1", 879 ServiceID: "mysql1", 880 ServiceName: "mysql-pool-r", 881 Status: api.HealthPassing, 882 } 883 err := a.State.AddCheck(chk1, "") 884 if err != nil { 885 t.Fatalf("Err: %v", err) 886 } 887 888 chk2 := &structs.HealthCheck{ 889 Node: a.Config.NodeName, 890 CheckID: "mysql1", 891 Name: "mysql1", 892 ServiceID: "mysql1", 893 ServiceName: "mysql-pool-r", 894 Status: api.HealthWarning, 895 } 896 err = a.State.AddCheck(chk2, "") 897 if err != nil { 898 t.Fatalf("Err: %v", err) 899 } 900 901 chk3 := &structs.HealthCheck{ 902 Node: a.Config.NodeName, 903 CheckID: "mysql2", 904 Name: "mysql2", 905 ServiceID: "mysql2", 906 ServiceName: "mysql-pool-r", 907 Status: api.HealthPassing, 908 } 909 err = a.State.AddCheck(chk3, "") 910 if err != nil { 911 t.Fatalf("Err: %v", err) 912 } 913 914 chk4 := &structs.HealthCheck{ 915 Node: a.Config.NodeName, 916 CheckID: "mysql2", 917 Name: "mysql2", 918 ServiceID: "mysql2", 919 ServiceName: "mysql-pool-r", 920 Status: api.HealthCritical, 921 } 922 err = a.State.AddCheck(chk4, "") 923 if err != nil { 924 t.Fatalf("Err: %v", err) 925 } 926 927 chk5 := &structs.HealthCheck{ 928 Node: a.Config.NodeName, 929 CheckID: "mysql3", 930 Name: "mysql3", 931 ServiceID: "mysql3", 932 ServiceName: "mysql-pool-rw", 933 Status: api.HealthWarning, 934 } 935 err = a.State.AddCheck(chk5, "") 936 if err != nil { 937 t.Fatalf("Err: %v", err) 938 } 939 940 chk6 := &structs.HealthCheck{ 941 Node: a.Config.NodeName, 942 CheckID: "mysql4", 943 Name: "mysql4", 944 ServiceID: "mysql4", 945 ServiceName: "mysql-pool-rw", 946 Status: api.HealthPassing, 947 } 948 err = a.State.AddCheck(chk6, "") 949 if err != nil { 950 t.Fatalf("Err: %v", err) 951 } 952 953 chk7 := &structs.HealthCheck{ 954 Node: a.Config.NodeName, 955 CheckID: "httpd1", 956 Name: "httpd1", 957 ServiceID: "httpd1", 958 ServiceName: "httpd", 959 Status: api.HealthPassing, 960 } 961 err = a.State.AddCheck(chk7, "") 962 if err != nil { 963 t.Fatalf("Err: %v", err) 964 } 965 966 chk8 := &structs.HealthCheck{ 967 Node: a.Config.NodeName, 968 CheckID: "httpd2", 969 Name: "httpd2", 970 ServiceID: "httpd2", 971 ServiceName: "httpd", 972 Status: api.HealthPassing, 973 } 974 err = a.State.AddCheck(chk8, "") 975 if err != nil { 976 t.Fatalf("Err: %v", err) 977 } 978 979 eval := func(t *testing.T, url string, expectedCode int, expected string) { 980 t.Helper() 981 t.Run("format=text", func(t *testing.T) { 982 t.Helper() 983 req, _ := http.NewRequest("GET", url+"?format=text", nil) 984 resp := httptest.NewRecorder() 985 data, err := a.srv.AgentHealthServiceByName(resp, req) 986 codeWithPayload, ok := err.(CodeWithPayloadError) 987 if !ok { 988 t.Fatalf("Err: %v", err) 989 } 990 if got, want := codeWithPayload.StatusCode, expectedCode; got != want { 991 t.Fatalf("returned bad status: %d. Body: %q", resp.Code, resp.Body.String()) 992 } 993 if got, want := codeWithPayload.Reason, expected; got != want { 994 t.Fatalf("got reason %q want %q", got, want) 995 } 996 if got, want := data, expected; got != want { 997 t.Fatalf("got body %q want %q", got, want) 998 } 999 }) 1000 t.Run("format=json", func(t *testing.T) { 1001 t.Helper() 1002 req, _ := http.NewRequest("GET", url, nil) 1003 resp := httptest.NewRecorder() 1004 dataRaw, err := a.srv.AgentHealthServiceByName(resp, req) 1005 codeWithPayload, ok := err.(CodeWithPayloadError) 1006 if !ok { 1007 t.Fatalf("Err: %v", err) 1008 } 1009 data, ok := dataRaw.([]api.AgentServiceChecksInfo) 1010 if !ok { 1011 t.Fatalf("Cannot connvert result to JSON") 1012 } 1013 if got, want := codeWithPayload.StatusCode, expectedCode; got != want { 1014 t.Fatalf("returned bad code: %d. Body: %#v", resp.Code, data) 1015 } 1016 if resp.Code != http.StatusNotFound { 1017 if codeWithPayload.Reason != expected { 1018 t.Fatalf("got wrong status %#v want %#v", codeWithPayload, expected) 1019 } 1020 } 1021 }) 1022 } 1023 1024 t.Run("passing checks", func(t *testing.T) { 1025 eval(t, "/v1/agent/health/service/name/httpd", http.StatusOK, "passing") 1026 }) 1027 t.Run("warning checks", func(t *testing.T) { 1028 eval(t, "/v1/agent/health/service/name/mysql-pool-rw", http.StatusTooManyRequests, "warning") 1029 }) 1030 t.Run("critical checks", func(t *testing.T) { 1031 eval(t, "/v1/agent/health/service/name/mysql-pool-r", http.StatusServiceUnavailable, "critical") 1032 }) 1033 t.Run("unknown serviceName", func(t *testing.T) { 1034 eval(t, "/v1/agent/health/service/name/test", http.StatusNotFound, "ServiceName test Not Found") 1035 }) 1036 nodeCheck := &structs.HealthCheck{ 1037 Node: a.Config.NodeName, 1038 CheckID: "diskCheck", 1039 Name: "diskCheck", 1040 Status: api.HealthCritical, 1041 } 1042 err = a.State.AddCheck(nodeCheck, "") 1043 1044 if err != nil { 1045 t.Fatalf("Err: %v", err) 1046 } 1047 t.Run("critical check on node", func(t *testing.T) { 1048 eval(t, "/v1/agent/health/service/name/mysql-pool-r", http.StatusServiceUnavailable, "critical") 1049 }) 1050 1051 err = a.State.RemoveCheck(nodeCheck.CheckID) 1052 if err != nil { 1053 t.Fatalf("Err: %v", err) 1054 } 1055 nodeCheck = &structs.HealthCheck{ 1056 Node: a.Config.NodeName, 1057 CheckID: "_node_maintenance", 1058 Name: "_node_maintenance", 1059 Status: api.HealthMaint, 1060 } 1061 err = a.State.AddCheck(nodeCheck, "") 1062 if err != nil { 1063 t.Fatalf("Err: %v", err) 1064 } 1065 t.Run("maintenance check on node", func(t *testing.T) { 1066 eval(t, "/v1/agent/health/service/name/mysql-pool-r", http.StatusServiceUnavailable, "maintenance") 1067 }) 1068 } 1069 1070 func TestAgent_Checks_ACLFilter(t *testing.T) { 1071 t.Parallel() 1072 a := NewTestAgent(t, t.Name(), TestACLConfig()) 1073 defer a.Shutdown() 1074 1075 testrpc.WaitForLeader(t, a.RPC, "dc1") 1076 chk1 := &structs.HealthCheck{ 1077 Node: a.Config.NodeName, 1078 CheckID: "mysql", 1079 Name: "mysql", 1080 Status: api.HealthPassing, 1081 } 1082 a.State.AddCheck(chk1, "") 1083 1084 t.Run("no token", func(t *testing.T) { 1085 req, _ := http.NewRequest("GET", "/v1/agent/checks", nil) 1086 obj, err := a.srv.AgentChecks(nil, req) 1087 if err != nil { 1088 t.Fatalf("Err: %v", err) 1089 } 1090 val := obj.(map[types.CheckID]*structs.HealthCheck) 1091 if len(val) != 0 { 1092 t.Fatalf("bad checks: %v", obj) 1093 } 1094 }) 1095 1096 t.Run("root token", func(t *testing.T) { 1097 req, _ := http.NewRequest("GET", "/v1/agent/checks?token=root", nil) 1098 obj, err := a.srv.AgentChecks(nil, req) 1099 if err != nil { 1100 t.Fatalf("Err: %v", err) 1101 } 1102 val := obj.(map[types.CheckID]*structs.HealthCheck) 1103 if len(val) != 1 { 1104 t.Fatalf("bad checks: %v", obj) 1105 } 1106 }) 1107 } 1108 1109 func TestAgent_Self(t *testing.T) { 1110 t.Parallel() 1111 a := NewTestAgent(t, t.Name(), ` 1112 node_meta { 1113 somekey = "somevalue" 1114 } 1115 `) 1116 defer a.Shutdown() 1117 1118 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 1119 req, _ := http.NewRequest("GET", "/v1/agent/self", nil) 1120 obj, err := a.srv.AgentSelf(nil, req) 1121 if err != nil { 1122 t.Fatalf("err: %v", err) 1123 } 1124 1125 val := obj.(Self) 1126 if int(val.Member.Port) != a.Config.SerfPortLAN { 1127 t.Fatalf("incorrect port: %v", obj) 1128 } 1129 1130 if val.DebugConfig["SerfPortLAN"].(int) != a.Config.SerfPortLAN { 1131 t.Fatalf("incorrect port: %v", obj) 1132 } 1133 1134 cs, err := a.GetLANCoordinate() 1135 if err != nil { 1136 t.Fatalf("err: %v", err) 1137 } 1138 if c := cs[a.config.SegmentName]; !reflect.DeepEqual(c, val.Coord) { 1139 t.Fatalf("coordinates are not equal: %v != %v", c, val.Coord) 1140 } 1141 delete(val.Meta, structs.MetaSegmentKey) // Added later, not in config. 1142 if !reflect.DeepEqual(a.config.NodeMeta, val.Meta) { 1143 t.Fatalf("meta fields are not equal: %v != %v", a.config.NodeMeta, val.Meta) 1144 } 1145 } 1146 1147 func TestAgent_Self_ACLDeny(t *testing.T) { 1148 t.Parallel() 1149 a := NewTestAgent(t, t.Name(), TestACLConfig()) 1150 defer a.Shutdown() 1151 1152 testrpc.WaitForLeader(t, a.RPC, "dc1") 1153 t.Run("no token", func(t *testing.T) { 1154 req, _ := http.NewRequest("GET", "/v1/agent/self", nil) 1155 if _, err := a.srv.AgentSelf(nil, req); !acl.IsErrPermissionDenied(err) { 1156 t.Fatalf("err: %v", err) 1157 } 1158 }) 1159 1160 t.Run("agent master token", func(t *testing.T) { 1161 req, _ := http.NewRequest("GET", "/v1/agent/self?token=towel", nil) 1162 if _, err := a.srv.AgentSelf(nil, req); err != nil { 1163 t.Fatalf("err: %v", err) 1164 } 1165 }) 1166 1167 t.Run("read-only token", func(t *testing.T) { 1168 ro := makeReadOnlyAgentACL(t, a.srv) 1169 req, _ := http.NewRequest("GET", fmt.Sprintf("/v1/agent/self?token=%s", ro), nil) 1170 if _, err := a.srv.AgentSelf(nil, req); err != nil { 1171 t.Fatalf("err: %v", err) 1172 } 1173 }) 1174 } 1175 1176 func TestAgent_Metrics_ACLDeny(t *testing.T) { 1177 t.Parallel() 1178 a := NewTestAgent(t, t.Name(), TestACLConfig()) 1179 defer a.Shutdown() 1180 1181 testrpc.WaitForLeader(t, a.RPC, "dc1") 1182 t.Run("no token", func(t *testing.T) { 1183 req, _ := http.NewRequest("GET", "/v1/agent/metrics", nil) 1184 if _, err := a.srv.AgentMetrics(nil, req); !acl.IsErrPermissionDenied(err) { 1185 t.Fatalf("err: %v", err) 1186 } 1187 }) 1188 1189 t.Run("agent master token", func(t *testing.T) { 1190 req, _ := http.NewRequest("GET", "/v1/agent/metrics?token=towel", nil) 1191 if _, err := a.srv.AgentMetrics(nil, req); err != nil { 1192 t.Fatalf("err: %v", err) 1193 } 1194 }) 1195 1196 t.Run("read-only token", func(t *testing.T) { 1197 ro := makeReadOnlyAgentACL(t, a.srv) 1198 req, _ := http.NewRequest("GET", fmt.Sprintf("/v1/agent/metrics?token=%s", ro), nil) 1199 if _, err := a.srv.AgentMetrics(nil, req); err != nil { 1200 t.Fatalf("err: %v", err) 1201 } 1202 }) 1203 } 1204 1205 func TestAgent_Reload(t *testing.T) { 1206 t.Parallel() 1207 dc1 := "dc1" 1208 a := NewTestAgent(t, t.Name(), ` 1209 acl_enforce_version_8 = false 1210 services = [ 1211 { 1212 name = "redis" 1213 } 1214 ] 1215 watches = [ 1216 { 1217 datacenter = "`+dc1+`" 1218 type = "key" 1219 key = "test" 1220 handler = "true" 1221 } 1222 ] 1223 limits = { 1224 rpc_rate=1 1225 rpc_max_burst=100 1226 } 1227 `) 1228 defer a.Shutdown() 1229 1230 testrpc.WaitForTestAgent(t, a.RPC, dc1) 1231 if a.State.Service("redis") == nil { 1232 t.Fatal("missing redis service") 1233 } 1234 1235 cfg2 := TestConfig(config.Source{ 1236 Name: "reload", 1237 Format: "hcl", 1238 Data: ` 1239 data_dir = "` + a.Config.DataDir + `" 1240 node_id = "` + string(a.Config.NodeID) + `" 1241 node_name = "` + a.Config.NodeName + `" 1242 1243 acl_enforce_version_8 = false 1244 services = [ 1245 { 1246 name = "redis-reloaded" 1247 } 1248 ] 1249 limits = { 1250 rpc_rate=2 1251 rpc_max_burst=200 1252 } 1253 `, 1254 }) 1255 1256 if err := a.ReloadConfig(cfg2); err != nil { 1257 t.Fatalf("got error %v want nil", err) 1258 } 1259 if a.State.Service("redis-reloaded") == nil { 1260 t.Fatal("missing redis-reloaded service") 1261 } 1262 1263 if a.config.RPCRateLimit != 2 { 1264 t.Fatalf("RPC rate not set correctly. Got %v. Want 2", a.config.RPCRateLimit) 1265 } 1266 1267 if a.config.RPCMaxBurst != 200 { 1268 t.Fatalf("RPC max burst not set correctly. Got %v. Want 200", a.config.RPCMaxBurst) 1269 } 1270 1271 for _, wp := range a.watchPlans { 1272 if !wp.IsStopped() { 1273 t.Fatalf("Reloading configs should stop watch plans of the previous configuration") 1274 } 1275 } 1276 } 1277 1278 func TestAgent_Reload_ACLDeny(t *testing.T) { 1279 t.Parallel() 1280 a := NewTestAgent(t, t.Name(), TestACLConfig()) 1281 defer a.Shutdown() 1282 1283 testrpc.WaitForLeader(t, a.RPC, "dc1") 1284 t.Run("no token", func(t *testing.T) { 1285 req, _ := http.NewRequest("PUT", "/v1/agent/reload", nil) 1286 if _, err := a.srv.AgentReload(nil, req); !acl.IsErrPermissionDenied(err) { 1287 t.Fatalf("err: %v", err) 1288 } 1289 }) 1290 1291 t.Run("read-only token", func(t *testing.T) { 1292 ro := makeReadOnlyAgentACL(t, a.srv) 1293 req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/reload?token=%s", ro), nil) 1294 if _, err := a.srv.AgentReload(nil, req); !acl.IsErrPermissionDenied(err) { 1295 t.Fatalf("err: %v", err) 1296 } 1297 }) 1298 1299 // This proves we call the ACL function, and we've got the other reload 1300 // test to prove we do the reload, which should be sufficient. 1301 // The reload logic is a little complex to set up so isn't worth 1302 // repeating again here. 1303 } 1304 1305 func TestAgent_Members(t *testing.T) { 1306 t.Parallel() 1307 a := NewTestAgent(t, t.Name(), "") 1308 defer a.Shutdown() 1309 1310 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 1311 req, _ := http.NewRequest("GET", "/v1/agent/members", nil) 1312 obj, err := a.srv.AgentMembers(nil, req) 1313 if err != nil { 1314 t.Fatalf("Err: %v", err) 1315 } 1316 val := obj.([]serf.Member) 1317 if len(val) == 0 { 1318 t.Fatalf("bad members: %v", obj) 1319 } 1320 1321 if int(val[0].Port) != a.Config.SerfPortLAN { 1322 t.Fatalf("not lan: %v", obj) 1323 } 1324 } 1325 1326 func TestAgent_Members_WAN(t *testing.T) { 1327 t.Parallel() 1328 a := NewTestAgent(t, t.Name(), "") 1329 defer a.Shutdown() 1330 1331 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 1332 req, _ := http.NewRequest("GET", "/v1/agent/members?wan=true", nil) 1333 obj, err := a.srv.AgentMembers(nil, req) 1334 if err != nil { 1335 t.Fatalf("Err: %v", err) 1336 } 1337 val := obj.([]serf.Member) 1338 if len(val) == 0 { 1339 t.Fatalf("bad members: %v", obj) 1340 } 1341 1342 if int(val[0].Port) != a.Config.SerfPortWAN { 1343 t.Fatalf("not wan: %v", obj) 1344 } 1345 } 1346 1347 func TestAgent_Members_ACLFilter(t *testing.T) { 1348 t.Parallel() 1349 a := NewTestAgent(t, t.Name(), TestACLConfig()) 1350 defer a.Shutdown() 1351 1352 testrpc.WaitForLeader(t, a.RPC, "dc1") 1353 t.Run("no token", func(t *testing.T) { 1354 req, _ := http.NewRequest("GET", "/v1/agent/members", nil) 1355 obj, err := a.srv.AgentMembers(nil, req) 1356 if err != nil { 1357 t.Fatalf("Err: %v", err) 1358 } 1359 val := obj.([]serf.Member) 1360 if len(val) != 0 { 1361 t.Fatalf("bad members: %v", obj) 1362 } 1363 }) 1364 1365 t.Run("root token", func(t *testing.T) { 1366 req, _ := http.NewRequest("GET", "/v1/agent/members?token=root", nil) 1367 obj, err := a.srv.AgentMembers(nil, req) 1368 if err != nil { 1369 t.Fatalf("Err: %v", err) 1370 } 1371 val := obj.([]serf.Member) 1372 if len(val) != 1 { 1373 t.Fatalf("bad members: %v", obj) 1374 } 1375 }) 1376 } 1377 1378 func TestAgent_Join(t *testing.T) { 1379 t.Parallel() 1380 a1 := NewTestAgent(t, t.Name(), "") 1381 defer a1.Shutdown() 1382 a2 := NewTestAgent(t, t.Name(), "") 1383 defer a2.Shutdown() 1384 testrpc.WaitForLeader(t, a1.RPC, "dc1") 1385 testrpc.WaitForLeader(t, a2.RPC, "dc1") 1386 1387 addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN) 1388 req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s", addr), nil) 1389 obj, err := a1.srv.AgentJoin(nil, req) 1390 if err != nil { 1391 t.Fatalf("Err: %v", err) 1392 } 1393 if obj != nil { 1394 t.Fatalf("Err: %v", obj) 1395 } 1396 1397 if len(a1.LANMembers()) != 2 { 1398 t.Fatalf("should have 2 members") 1399 } 1400 1401 retry.Run(t, func(r *retry.R) { 1402 if got, want := len(a2.LANMembers()), 2; got != want { 1403 r.Fatalf("got %d LAN members want %d", got, want) 1404 } 1405 }) 1406 } 1407 1408 func TestAgent_Join_WAN(t *testing.T) { 1409 t.Parallel() 1410 a1 := NewTestAgent(t, t.Name(), "") 1411 defer a1.Shutdown() 1412 a2 := NewTestAgent(t, t.Name(), "") 1413 defer a2.Shutdown() 1414 testrpc.WaitForLeader(t, a1.RPC, "dc1") 1415 testrpc.WaitForLeader(t, a2.RPC, "dc1") 1416 1417 addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortWAN) 1418 req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s?wan=true", addr), nil) 1419 obj, err := a1.srv.AgentJoin(nil, req) 1420 if err != nil { 1421 t.Fatalf("Err: %v", err) 1422 } 1423 if obj != nil { 1424 t.Fatalf("Err: %v", obj) 1425 } 1426 1427 if len(a1.WANMembers()) != 2 { 1428 t.Fatalf("should have 2 members") 1429 } 1430 1431 retry.Run(t, func(r *retry.R) { 1432 if got, want := len(a2.WANMembers()), 2; got != want { 1433 r.Fatalf("got %d WAN members want %d", got, want) 1434 } 1435 }) 1436 } 1437 1438 func TestAgent_Join_ACLDeny(t *testing.T) { 1439 t.Parallel() 1440 a1 := NewTestAgent(t, t.Name(), TestACLConfig()) 1441 defer a1.Shutdown() 1442 a2 := NewTestAgent(t, t.Name(), "") 1443 defer a2.Shutdown() 1444 testrpc.WaitForLeader(t, a1.RPC, "dc1") 1445 testrpc.WaitForLeader(t, a2.RPC, "dc1") 1446 1447 addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN) 1448 1449 t.Run("no token", func(t *testing.T) { 1450 req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s", addr), nil) 1451 if _, err := a1.srv.AgentJoin(nil, req); !acl.IsErrPermissionDenied(err) { 1452 t.Fatalf("err: %v", err) 1453 } 1454 }) 1455 1456 t.Run("agent master token", func(t *testing.T) { 1457 req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s?token=towel", addr), nil) 1458 _, err := a1.srv.AgentJoin(nil, req) 1459 if err != nil { 1460 t.Fatalf("err: %v", err) 1461 } 1462 }) 1463 1464 t.Run("read-only token", func(t *testing.T) { 1465 ro := makeReadOnlyAgentACL(t, a1.srv) 1466 req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s?token=%s", addr, ro), nil) 1467 if _, err := a1.srv.AgentJoin(nil, req); !acl.IsErrPermissionDenied(err) { 1468 t.Fatalf("err: %v", err) 1469 } 1470 }) 1471 } 1472 1473 type mockNotifier struct{ s string } 1474 1475 func (n *mockNotifier) Notify(state string) error { 1476 n.s = state 1477 return nil 1478 } 1479 1480 func TestAgent_JoinLANNotify(t *testing.T) { 1481 t.Parallel() 1482 a1 := NewTestAgent(t, t.Name(), "") 1483 defer a1.Shutdown() 1484 testrpc.WaitForLeader(t, a1.RPC, "dc1") 1485 1486 a2 := NewTestAgent(t, t.Name(), ` 1487 server = false 1488 bootstrap = false 1489 `) 1490 defer a2.Shutdown() 1491 1492 notif := &mockNotifier{} 1493 a1.joinLANNotifier = notif 1494 1495 addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN) 1496 _, err := a1.JoinLAN([]string{addr}) 1497 if err != nil { 1498 t.Fatalf("err: %v", err) 1499 } 1500 1501 if got, want := notif.s, "READY=1"; got != want { 1502 t.Fatalf("got joinLAN notification %q want %q", got, want) 1503 } 1504 } 1505 1506 func TestAgent_Leave(t *testing.T) { 1507 t.Parallel() 1508 a1 := NewTestAgent(t, t.Name(), "") 1509 defer a1.Shutdown() 1510 testrpc.WaitForLeader(t, a1.RPC, "dc1") 1511 1512 a2 := NewTestAgent(t, t.Name(), ` 1513 server = false 1514 bootstrap = false 1515 `) 1516 defer a2.Shutdown() 1517 1518 // Join first 1519 addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN) 1520 _, err := a1.JoinLAN([]string{addr}) 1521 if err != nil { 1522 t.Fatalf("err: %v", err) 1523 } 1524 1525 // Graceful leave now 1526 req, _ := http.NewRequest("PUT", "/v1/agent/leave", nil) 1527 obj, err := a2.srv.AgentLeave(nil, req) 1528 if err != nil { 1529 t.Fatalf("Err: %v", err) 1530 } 1531 if obj != nil { 1532 t.Fatalf("Err: %v", obj) 1533 } 1534 retry.Run(t, func(r *retry.R) { 1535 m := a1.LANMembers() 1536 if got, want := m[1].Status, serf.StatusLeft; got != want { 1537 r.Fatalf("got status %q want %q", got, want) 1538 } 1539 }) 1540 } 1541 1542 func TestAgent_Leave_ACLDeny(t *testing.T) { 1543 t.Parallel() 1544 a := NewTestAgent(t, t.Name(), TestACLConfig()) 1545 defer a.Shutdown() 1546 testrpc.WaitForLeader(t, a.RPC, "dc1") 1547 1548 t.Run("no token", func(t *testing.T) { 1549 req, _ := http.NewRequest("PUT", "/v1/agent/leave", nil) 1550 if _, err := a.srv.AgentLeave(nil, req); !acl.IsErrPermissionDenied(err) { 1551 t.Fatalf("err: %v", err) 1552 } 1553 }) 1554 1555 t.Run("read-only token", func(t *testing.T) { 1556 ro := makeReadOnlyAgentACL(t, a.srv) 1557 req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/leave?token=%s", ro), nil) 1558 if _, err := a.srv.AgentLeave(nil, req); !acl.IsErrPermissionDenied(err) { 1559 t.Fatalf("err: %v", err) 1560 } 1561 }) 1562 1563 // this sub-test will change the state so that there is no leader. 1564 // it must therefore be the last one in this list. 1565 t.Run("agent master token", func(t *testing.T) { 1566 req, _ := http.NewRequest("PUT", "/v1/agent/leave?token=towel", nil) 1567 if _, err := a.srv.AgentLeave(nil, req); err != nil { 1568 t.Fatalf("err: %v", err) 1569 } 1570 }) 1571 } 1572 1573 func TestAgent_ForceLeave(t *testing.T) { 1574 t.Parallel() 1575 a1 := NewTestAgent(t, t.Name(), "") 1576 defer a1.Shutdown() 1577 a2 := NewTestAgent(t, t.Name(), "") 1578 testrpc.WaitForLeader(t, a1.RPC, "dc1") 1579 testrpc.WaitForLeader(t, a2.RPC, "dc1") 1580 1581 // Join first 1582 addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN) 1583 _, err := a1.JoinLAN([]string{addr}) 1584 if err != nil { 1585 t.Fatalf("err: %v", err) 1586 } 1587 1588 // this test probably needs work 1589 a2.Shutdown() 1590 // Wait for agent being marked as failed, so we wait for full shutdown of Agent 1591 retry.Run(t, func(r *retry.R) { 1592 m := a1.LANMembers() 1593 if got, want := m[1].Status, serf.StatusFailed; got != want { 1594 r.Fatalf("got status %q want %q", got, want) 1595 } 1596 }) 1597 1598 // Force leave now 1599 req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/force-leave/%s", a2.Config.NodeName), nil) 1600 obj, err := a1.srv.AgentForceLeave(nil, req) 1601 if err != nil { 1602 t.Fatalf("Err: %v", err) 1603 } 1604 if obj != nil { 1605 t.Fatalf("Err: %v", obj) 1606 } 1607 retry.Run(t, func(r *retry.R) { 1608 m := a1.LANMembers() 1609 if got, want := m[1].Status, serf.StatusLeft; got != want { 1610 r.Fatalf("got status %q want %q", got, want) 1611 } 1612 }) 1613 1614 } 1615 1616 func TestAgent_ForceLeave_ACLDeny(t *testing.T) { 1617 t.Parallel() 1618 a := NewTestAgent(t, t.Name(), TestACLConfig()) 1619 defer a.Shutdown() 1620 testrpc.WaitForLeader(t, a.RPC, "dc1") 1621 1622 t.Run("no token", func(t *testing.T) { 1623 req, _ := http.NewRequest("PUT", "/v1/agent/force-leave/nope", nil) 1624 if _, err := a.srv.AgentForceLeave(nil, req); !acl.IsErrPermissionDenied(err) { 1625 t.Fatalf("err: %v", err) 1626 } 1627 }) 1628 1629 t.Run("agent master token", func(t *testing.T) { 1630 req, _ := http.NewRequest("PUT", "/v1/agent/force-leave/nope?token=towel", nil) 1631 if _, err := a.srv.AgentForceLeave(nil, req); err != nil { 1632 t.Fatalf("err: %v", err) 1633 } 1634 }) 1635 1636 t.Run("read-only token", func(t *testing.T) { 1637 ro := makeReadOnlyAgentACL(t, a.srv) 1638 req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/force-leave/nope?token=%s", ro), nil) 1639 if _, err := a.srv.AgentForceLeave(nil, req); !acl.IsErrPermissionDenied(err) { 1640 t.Fatalf("err: %v", err) 1641 } 1642 }) 1643 } 1644 1645 func TestAgent_RegisterCheck(t *testing.T) { 1646 t.Parallel() 1647 a := NewTestAgent(t, t.Name(), "") 1648 defer a.Shutdown() 1649 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 1650 1651 args := &structs.CheckDefinition{ 1652 Name: "test", 1653 TTL: 15 * time.Second, 1654 } 1655 req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token=abc123", jsonReader(args)) 1656 obj, err := a.srv.AgentRegisterCheck(nil, req) 1657 if err != nil { 1658 t.Fatalf("err: %v", err) 1659 } 1660 if obj != nil { 1661 t.Fatalf("bad: %v", obj) 1662 } 1663 1664 // Ensure we have a check mapping 1665 checkID := types.CheckID("test") 1666 if _, ok := a.State.Checks()[checkID]; !ok { 1667 t.Fatalf("missing test check") 1668 } 1669 1670 if _, ok := a.checkTTLs[checkID]; !ok { 1671 t.Fatalf("missing test check ttl") 1672 } 1673 1674 // Ensure the token was configured 1675 if token := a.State.CheckToken(checkID); token == "" { 1676 t.Fatalf("missing token") 1677 } 1678 1679 // By default, checks start in critical state. 1680 state := a.State.Checks()[checkID] 1681 if state.Status != api.HealthCritical { 1682 t.Fatalf("bad: %v", state) 1683 } 1684 } 1685 1686 // This verifies all the forms of the new args-style check that we need to 1687 // support as a result of https://github.com/hashicorp/consul/issues/3587. 1688 func TestAgent_RegisterCheck_Scripts(t *testing.T) { 1689 t.Parallel() 1690 a := NewTestAgent(t, t.Name(), ` 1691 enable_script_checks = true 1692 `) 1693 defer a.Shutdown() 1694 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 1695 1696 tests := []struct { 1697 name string 1698 check map[string]interface{} 1699 }{ 1700 { 1701 "== Consul 1.0.0", 1702 map[string]interface{}{ 1703 "Name": "test", 1704 "Interval": "2s", 1705 "ScriptArgs": []string{"true"}, 1706 }, 1707 }, 1708 { 1709 "> Consul 1.0.0 (fixup)", 1710 map[string]interface{}{ 1711 "Name": "test", 1712 "Interval": "2s", 1713 "script_args": []string{"true"}, 1714 }, 1715 }, 1716 { 1717 "> Consul 1.0.0", 1718 map[string]interface{}{ 1719 "Name": "test", 1720 "Interval": "2s", 1721 "Args": []string{"true"}, 1722 }, 1723 }, 1724 } 1725 for _, tt := range tests { 1726 t.Run(tt.name+" as node check", func(t *testing.T) { 1727 req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(tt.check)) 1728 resp := httptest.NewRecorder() 1729 if _, err := a.srv.AgentRegisterCheck(resp, req); err != nil { 1730 t.Fatalf("err: %v", err) 1731 } 1732 if resp.Code != http.StatusOK { 1733 t.Fatalf("bad: %d", resp.Code) 1734 } 1735 }) 1736 1737 t.Run(tt.name+" as top-level service check", func(t *testing.T) { 1738 args := map[string]interface{}{ 1739 "Name": "a", 1740 "Port": 1234, 1741 "Check": tt.check, 1742 } 1743 1744 req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) 1745 resp := httptest.NewRecorder() 1746 if _, err := a.srv.AgentRegisterService(resp, req); err != nil { 1747 t.Fatalf("err: %v", err) 1748 } 1749 if resp.Code != http.StatusOK { 1750 t.Fatalf("bad: %d", resp.Code) 1751 } 1752 }) 1753 1754 t.Run(tt.name+" as slice-based service check", func(t *testing.T) { 1755 args := map[string]interface{}{ 1756 "Name": "a", 1757 "Port": 1234, 1758 "Checks": []map[string]interface{}{tt.check}, 1759 } 1760 1761 req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) 1762 resp := httptest.NewRecorder() 1763 if _, err := a.srv.AgentRegisterService(resp, req); err != nil { 1764 t.Fatalf("err: %v", err) 1765 } 1766 if resp.Code != http.StatusOK { 1767 t.Fatalf("bad: %d", resp.Code) 1768 } 1769 }) 1770 } 1771 } 1772 1773 func TestAgent_RegisterCheckScriptsExecDisable(t *testing.T) { 1774 t.Parallel() 1775 a := NewTestAgent(t, t.Name(), "") 1776 defer a.Shutdown() 1777 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 1778 1779 args := &structs.CheckDefinition{ 1780 Name: "test", 1781 ScriptArgs: []string{"true"}, 1782 Interval: time.Second, 1783 } 1784 req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token=abc123", jsonReader(args)) 1785 res := httptest.NewRecorder() 1786 _, err := a.srv.AgentRegisterCheck(res, req) 1787 if err == nil { 1788 t.Fatalf("expected error but got nil") 1789 } 1790 if !strings.Contains(err.Error(), "Scripts are disabled on this agent") { 1791 t.Fatalf("expected script disabled error, got: %s", err) 1792 } 1793 checkID := types.CheckID("test") 1794 if _, ok := a.State.Checks()[checkID]; ok { 1795 t.Fatalf("check registered with exec disable") 1796 } 1797 } 1798 1799 func TestAgent_RegisterCheckScriptsExecRemoteDisable(t *testing.T) { 1800 t.Parallel() 1801 a := NewTestAgent(t, t.Name(), ` 1802 enable_local_script_checks = true 1803 `) 1804 defer a.Shutdown() 1805 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 1806 1807 args := &structs.CheckDefinition{ 1808 Name: "test", 1809 ScriptArgs: []string{"true"}, 1810 Interval: time.Second, 1811 } 1812 req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token=abc123", jsonReader(args)) 1813 res := httptest.NewRecorder() 1814 _, err := a.srv.AgentRegisterCheck(res, req) 1815 if err == nil { 1816 t.Fatalf("expected error but got nil") 1817 } 1818 if !strings.Contains(err.Error(), "Scripts are disabled on this agent") { 1819 t.Fatalf("expected script disabled error, got: %s", err) 1820 } 1821 checkID := types.CheckID("test") 1822 if _, ok := a.State.Checks()[checkID]; ok { 1823 t.Fatalf("check registered with exec disable") 1824 } 1825 } 1826 1827 func TestAgent_RegisterCheck_Passing(t *testing.T) { 1828 t.Parallel() 1829 a := NewTestAgent(t, t.Name(), "") 1830 defer a.Shutdown() 1831 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 1832 1833 args := &structs.CheckDefinition{ 1834 Name: "test", 1835 TTL: 15 * time.Second, 1836 Status: api.HealthPassing, 1837 } 1838 req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(args)) 1839 obj, err := a.srv.AgentRegisterCheck(nil, req) 1840 if err != nil { 1841 t.Fatalf("err: %v", err) 1842 } 1843 if obj != nil { 1844 t.Fatalf("bad: %v", obj) 1845 } 1846 1847 // Ensure we have a check mapping 1848 checkID := types.CheckID("test") 1849 if _, ok := a.State.Checks()[checkID]; !ok { 1850 t.Fatalf("missing test check") 1851 } 1852 1853 if _, ok := a.checkTTLs[checkID]; !ok { 1854 t.Fatalf("missing test check ttl") 1855 } 1856 1857 state := a.State.Checks()[checkID] 1858 if state.Status != api.HealthPassing { 1859 t.Fatalf("bad: %v", state) 1860 } 1861 } 1862 1863 func TestAgent_RegisterCheck_BadStatus(t *testing.T) { 1864 t.Parallel() 1865 a := NewTestAgent(t, t.Name(), "") 1866 defer a.Shutdown() 1867 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 1868 1869 args := &structs.CheckDefinition{ 1870 Name: "test", 1871 TTL: 15 * time.Second, 1872 Status: "fluffy", 1873 } 1874 req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(args)) 1875 resp := httptest.NewRecorder() 1876 if _, err := a.srv.AgentRegisterCheck(resp, req); err != nil { 1877 t.Fatalf("err: %v", err) 1878 } 1879 if resp.Code != 400 { 1880 t.Fatalf("accepted bad status") 1881 } 1882 } 1883 1884 func TestAgent_RegisterCheck_ACLDeny(t *testing.T) { 1885 t.Parallel() 1886 a := NewTestAgent(t, t.Name(), TestACLConfig()) 1887 defer a.Shutdown() 1888 testrpc.WaitForLeader(t, a.RPC, "dc1") 1889 1890 args := &structs.CheckDefinition{ 1891 Name: "test", 1892 TTL: 15 * time.Second, 1893 } 1894 1895 t.Run("no token", func(t *testing.T) { 1896 req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(args)) 1897 if _, err := a.srv.AgentRegisterCheck(nil, req); !acl.IsErrPermissionDenied(err) { 1898 t.Fatalf("err: %v", err) 1899 } 1900 }) 1901 1902 t.Run("root token", func(t *testing.T) { 1903 req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token=root", jsonReader(args)) 1904 if _, err := a.srv.AgentRegisterCheck(nil, req); err != nil { 1905 t.Fatalf("err: %v", err) 1906 } 1907 }) 1908 } 1909 1910 func TestAgent_DeregisterCheck(t *testing.T) { 1911 t.Parallel() 1912 a := NewTestAgent(t, t.Name(), "") 1913 defer a.Shutdown() 1914 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 1915 1916 chk := &structs.HealthCheck{Name: "test", CheckID: "test"} 1917 if err := a.AddCheck(chk, nil, false, "", ConfigSourceLocal); err != nil { 1918 t.Fatalf("err: %v", err) 1919 } 1920 1921 req, _ := http.NewRequest("PUT", "/v1/agent/check/deregister/test", nil) 1922 obj, err := a.srv.AgentDeregisterCheck(nil, req) 1923 if err != nil { 1924 t.Fatalf("err: %v", err) 1925 } 1926 if obj != nil { 1927 t.Fatalf("bad: %v", obj) 1928 } 1929 1930 // Ensure we have a check mapping 1931 if _, ok := a.State.Checks()["test"]; ok { 1932 t.Fatalf("have test check") 1933 } 1934 } 1935 1936 func TestAgent_DeregisterCheckACLDeny(t *testing.T) { 1937 t.Parallel() 1938 a := NewTestAgent(t, t.Name(), TestACLConfig()) 1939 defer a.Shutdown() 1940 testrpc.WaitForLeader(t, a.RPC, "dc1") 1941 1942 chk := &structs.HealthCheck{Name: "test", CheckID: "test"} 1943 if err := a.AddCheck(chk, nil, false, "", ConfigSourceLocal); err != nil { 1944 t.Fatalf("err: %v", err) 1945 } 1946 1947 t.Run("no token", func(t *testing.T) { 1948 req, _ := http.NewRequest("PUT", "/v1/agent/check/deregister/test", nil) 1949 if _, err := a.srv.AgentDeregisterCheck(nil, req); !acl.IsErrPermissionDenied(err) { 1950 t.Fatalf("err: %v", err) 1951 } 1952 }) 1953 1954 t.Run("root token", func(t *testing.T) { 1955 req, _ := http.NewRequest("PUT", "/v1/agent/check/deregister/test?token=root", nil) 1956 if _, err := a.srv.AgentDeregisterCheck(nil, req); err != nil { 1957 t.Fatalf("err: %v", err) 1958 } 1959 }) 1960 } 1961 1962 func TestAgent_PassCheck(t *testing.T) { 1963 t.Parallel() 1964 a := NewTestAgent(t, t.Name(), "") 1965 defer a.Shutdown() 1966 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 1967 1968 chk := &structs.HealthCheck{Name: "test", CheckID: "test"} 1969 chkType := &structs.CheckType{TTL: 15 * time.Second} 1970 if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil { 1971 t.Fatalf("err: %v", err) 1972 } 1973 1974 req, _ := http.NewRequest("PUT", "/v1/agent/check/pass/test", nil) 1975 obj, err := a.srv.AgentCheckPass(nil, req) 1976 if err != nil { 1977 t.Fatalf("err: %v", err) 1978 } 1979 if obj != nil { 1980 t.Fatalf("bad: %v", obj) 1981 } 1982 1983 // Ensure we have a check mapping 1984 state := a.State.Checks()["test"] 1985 if state.Status != api.HealthPassing { 1986 t.Fatalf("bad: %v", state) 1987 } 1988 } 1989 1990 func TestAgent_PassCheck_ACLDeny(t *testing.T) { 1991 t.Parallel() 1992 a := NewTestAgent(t, t.Name(), TestACLConfig()) 1993 defer a.Shutdown() 1994 testrpc.WaitForLeader(t, a.RPC, "dc1") 1995 1996 chk := &structs.HealthCheck{Name: "test", CheckID: "test"} 1997 chkType := &structs.CheckType{TTL: 15 * time.Second} 1998 if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil { 1999 t.Fatalf("err: %v", err) 2000 } 2001 2002 t.Run("no token", func(t *testing.T) { 2003 req, _ := http.NewRequest("PUT", "/v1/agent/check/pass/test", nil) 2004 if _, err := a.srv.AgentCheckPass(nil, req); !acl.IsErrPermissionDenied(err) { 2005 t.Fatalf("err: %v", err) 2006 } 2007 }) 2008 2009 t.Run("root token", func(t *testing.T) { 2010 req, _ := http.NewRequest("PUT", "/v1/agent/check/pass/test?token=root", nil) 2011 if _, err := a.srv.AgentCheckPass(nil, req); err != nil { 2012 t.Fatalf("err: %v", err) 2013 } 2014 }) 2015 } 2016 2017 func TestAgent_WarnCheck(t *testing.T) { 2018 t.Parallel() 2019 a := NewTestAgent(t, t.Name(), "") 2020 defer a.Shutdown() 2021 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 2022 2023 chk := &structs.HealthCheck{Name: "test", CheckID: "test"} 2024 chkType := &structs.CheckType{TTL: 15 * time.Second} 2025 if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil { 2026 t.Fatalf("err: %v", err) 2027 } 2028 2029 req, _ := http.NewRequest("PUT", "/v1/agent/check/warn/test", nil) 2030 obj, err := a.srv.AgentCheckWarn(nil, req) 2031 if err != nil { 2032 t.Fatalf("err: %v", err) 2033 } 2034 if obj != nil { 2035 t.Fatalf("bad: %v", obj) 2036 } 2037 2038 // Ensure we have a check mapping 2039 state := a.State.Checks()["test"] 2040 if state.Status != api.HealthWarning { 2041 t.Fatalf("bad: %v", state) 2042 } 2043 } 2044 2045 func TestAgent_WarnCheck_ACLDeny(t *testing.T) { 2046 t.Parallel() 2047 a := NewTestAgent(t, t.Name(), TestACLConfig()) 2048 defer a.Shutdown() 2049 testrpc.WaitForLeader(t, a.RPC, "dc1") 2050 2051 chk := &structs.HealthCheck{Name: "test", CheckID: "test"} 2052 chkType := &structs.CheckType{TTL: 15 * time.Second} 2053 if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil { 2054 t.Fatalf("err: %v", err) 2055 } 2056 2057 t.Run("no token", func(t *testing.T) { 2058 req, _ := http.NewRequest("PUT", "/v1/agent/check/warn/test", nil) 2059 if _, err := a.srv.AgentCheckWarn(nil, req); !acl.IsErrPermissionDenied(err) { 2060 t.Fatalf("err: %v", err) 2061 } 2062 }) 2063 2064 t.Run("root token", func(t *testing.T) { 2065 req, _ := http.NewRequest("PUT", "/v1/agent/check/warn/test?token=root", nil) 2066 if _, err := a.srv.AgentCheckWarn(nil, req); err != nil { 2067 t.Fatalf("err: %v", err) 2068 } 2069 }) 2070 } 2071 2072 func TestAgent_FailCheck(t *testing.T) { 2073 t.Parallel() 2074 a := NewTestAgent(t, t.Name(), "") 2075 defer a.Shutdown() 2076 testrpc.WaitForLeader(t, a.RPC, "dc1") 2077 2078 chk := &structs.HealthCheck{Name: "test", CheckID: "test"} 2079 chkType := &structs.CheckType{TTL: 15 * time.Second} 2080 if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil { 2081 t.Fatalf("err: %v", err) 2082 } 2083 2084 req, _ := http.NewRequest("PUT", "/v1/agent/check/fail/test", nil) 2085 obj, err := a.srv.AgentCheckFail(nil, req) 2086 if err != nil { 2087 t.Fatalf("err: %v", err) 2088 } 2089 if obj != nil { 2090 t.Fatalf("bad: %v", obj) 2091 } 2092 2093 // Ensure we have a check mapping 2094 state := a.State.Checks()["test"] 2095 if state.Status != api.HealthCritical { 2096 t.Fatalf("bad: %v", state) 2097 } 2098 } 2099 2100 func TestAgent_FailCheck_ACLDeny(t *testing.T) { 2101 t.Parallel() 2102 a := NewTestAgent(t, t.Name(), TestACLConfig()) 2103 defer a.Shutdown() 2104 testrpc.WaitForLeader(t, a.RPC, "dc1") 2105 2106 chk := &structs.HealthCheck{Name: "test", CheckID: "test"} 2107 chkType := &structs.CheckType{TTL: 15 * time.Second} 2108 if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil { 2109 t.Fatalf("err: %v", err) 2110 } 2111 2112 t.Run("no token", func(t *testing.T) { 2113 req, _ := http.NewRequest("PUT", "/v1/agent/check/fail/test", nil) 2114 if _, err := a.srv.AgentCheckFail(nil, req); !acl.IsErrPermissionDenied(err) { 2115 t.Fatalf("err: %v", err) 2116 } 2117 }) 2118 2119 t.Run("root token", func(t *testing.T) { 2120 req, _ := http.NewRequest("PUT", "/v1/agent/check/fail/test?token=root", nil) 2121 if _, err := a.srv.AgentCheckFail(nil, req); err != nil { 2122 t.Fatalf("err: %v", err) 2123 } 2124 }) 2125 } 2126 2127 func TestAgent_UpdateCheck(t *testing.T) { 2128 t.Parallel() 2129 a := NewTestAgent(t, t.Name(), "") 2130 defer a.Shutdown() 2131 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 2132 2133 chk := &structs.HealthCheck{Name: "test", CheckID: "test"} 2134 chkType := &structs.CheckType{TTL: 15 * time.Second} 2135 if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil { 2136 t.Fatalf("err: %v", err) 2137 } 2138 2139 cases := []checkUpdate{ 2140 checkUpdate{api.HealthPassing, "hello-passing"}, 2141 checkUpdate{api.HealthCritical, "hello-critical"}, 2142 checkUpdate{api.HealthWarning, "hello-warning"}, 2143 } 2144 2145 for _, c := range cases { 2146 t.Run(c.Status, func(t *testing.T) { 2147 req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(c)) 2148 resp := httptest.NewRecorder() 2149 obj, err := a.srv.AgentCheckUpdate(resp, req) 2150 if err != nil { 2151 t.Fatalf("err: %v", err) 2152 } 2153 if obj != nil { 2154 t.Fatalf("bad: %v", obj) 2155 } 2156 if resp.Code != 200 { 2157 t.Fatalf("expected 200, got %d", resp.Code) 2158 } 2159 2160 state := a.State.Checks()["test"] 2161 if state.Status != c.Status || state.Output != c.Output { 2162 t.Fatalf("bad: %v", state) 2163 } 2164 }) 2165 } 2166 2167 t.Run("log output limit", func(t *testing.T) { 2168 args := checkUpdate{ 2169 Status: api.HealthPassing, 2170 Output: strings.Repeat("-= bad -=", 5*checks.BufSize), 2171 } 2172 req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(args)) 2173 resp := httptest.NewRecorder() 2174 obj, err := a.srv.AgentCheckUpdate(resp, req) 2175 if err != nil { 2176 t.Fatalf("err: %v", err) 2177 } 2178 if obj != nil { 2179 t.Fatalf("bad: %v", obj) 2180 } 2181 if resp.Code != 200 { 2182 t.Fatalf("expected 200, got %d", resp.Code) 2183 } 2184 2185 // Since we append some notes about truncating, we just do a 2186 // rough check that the output buffer was cut down so this test 2187 // isn't super brittle. 2188 state := a.State.Checks()["test"] 2189 if state.Status != api.HealthPassing || len(state.Output) > 2*checks.BufSize { 2190 t.Fatalf("bad: %v", state) 2191 } 2192 }) 2193 2194 t.Run("bogus status", func(t *testing.T) { 2195 args := checkUpdate{Status: "itscomplicated"} 2196 req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(args)) 2197 resp := httptest.NewRecorder() 2198 obj, err := a.srv.AgentCheckUpdate(resp, req) 2199 if err != nil { 2200 t.Fatalf("err: %v", err) 2201 } 2202 if obj != nil { 2203 t.Fatalf("bad: %v", obj) 2204 } 2205 if resp.Code != 400 { 2206 t.Fatalf("expected 400, got %d", resp.Code) 2207 } 2208 }) 2209 } 2210 2211 func TestAgent_UpdateCheck_ACLDeny(t *testing.T) { 2212 t.Parallel() 2213 a := NewTestAgent(t, t.Name(), TestACLConfig()) 2214 defer a.Shutdown() 2215 testrpc.WaitForLeader(t, a.RPC, "dc1") 2216 2217 chk := &structs.HealthCheck{Name: "test", CheckID: "test"} 2218 chkType := &structs.CheckType{TTL: 15 * time.Second} 2219 if err := a.AddCheck(chk, chkType, false, "", ConfigSourceLocal); err != nil { 2220 t.Fatalf("err: %v", err) 2221 } 2222 2223 t.Run("no token", func(t *testing.T) { 2224 args := checkUpdate{api.HealthPassing, "hello-passing"} 2225 req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(args)) 2226 if _, err := a.srv.AgentCheckUpdate(nil, req); !acl.IsErrPermissionDenied(err) { 2227 t.Fatalf("err: %v", err) 2228 } 2229 }) 2230 2231 t.Run("root token", func(t *testing.T) { 2232 args := checkUpdate{api.HealthPassing, "hello-passing"} 2233 req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test?token=root", jsonReader(args)) 2234 if _, err := a.srv.AgentCheckUpdate(nil, req); err != nil { 2235 t.Fatalf("err: %v", err) 2236 } 2237 }) 2238 } 2239 2240 func TestAgent_RegisterService(t *testing.T) { 2241 t.Parallel() 2242 a := NewTestAgent(t, t.Name(), "") 2243 defer a.Shutdown() 2244 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 2245 2246 args := &structs.ServiceDefinition{ 2247 Name: "test", 2248 Meta: map[string]string{"hello": "world"}, 2249 Tags: []string{"master"}, 2250 Port: 8000, 2251 Check: structs.CheckType{ 2252 TTL: 15 * time.Second, 2253 }, 2254 Checks: []*structs.CheckType{ 2255 &structs.CheckType{ 2256 TTL: 20 * time.Second, 2257 }, 2258 &structs.CheckType{ 2259 TTL: 30 * time.Second, 2260 }, 2261 }, 2262 Weights: &structs.Weights{ 2263 Passing: 100, 2264 Warning: 3, 2265 }, 2266 } 2267 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) 2268 2269 obj, err := a.srv.AgentRegisterService(nil, req) 2270 if err != nil { 2271 t.Fatalf("err: %v", err) 2272 } 2273 if obj != nil { 2274 t.Fatalf("bad: %v", obj) 2275 } 2276 2277 // Ensure the service 2278 if _, ok := a.State.Services()["test"]; !ok { 2279 t.Fatalf("missing test service") 2280 } 2281 if val := a.State.Service("test").Meta["hello"]; val != "world" { 2282 t.Fatalf("Missing meta: %v", a.State.Service("test").Meta) 2283 } 2284 if val := a.State.Service("test").Weights.Passing; val != 100 { 2285 t.Fatalf("Expected 100 for Weights.Passing, got: %v", val) 2286 } 2287 if val := a.State.Service("test").Weights.Warning; val != 3 { 2288 t.Fatalf("Expected 3 for Weights.Warning, got: %v", val) 2289 } 2290 2291 // Ensure we have a check mapping 2292 checks := a.State.Checks() 2293 if len(checks) != 3 { 2294 t.Fatalf("bad: %v", checks) 2295 } 2296 2297 if len(a.checkTTLs) != 3 { 2298 t.Fatalf("missing test check ttls: %v", a.checkTTLs) 2299 } 2300 2301 // Ensure the token was configured 2302 if token := a.State.ServiceToken("test"); token == "" { 2303 t.Fatalf("missing token") 2304 } 2305 } 2306 2307 func TestAgent_RegisterService_TranslateKeys(t *testing.T) { 2308 t.Parallel() 2309 a := NewTestAgent(t, t.Name(), ` 2310 connect { 2311 proxy { 2312 allow_managed_api_registration = true 2313 } 2314 } 2315 `) 2316 defer a.Shutdown() 2317 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 2318 2319 json := ` 2320 { 2321 "name":"test", 2322 "port":8000, 2323 "enable_tag_override": true, 2324 "meta": { 2325 "some": "meta", 2326 "enable_tag_override": "meta is 'opaque' so should not get translated" 2327 }, 2328 "kind": "connect-proxy",` + 2329 // Note the uppercase P is important here - it ensures translation works 2330 // correctly in case-insensitive way. Without it this test can pass even 2331 // when translation is broken for other valid inputs. 2332 `"Proxy": { 2333 "destination_service_name": "web", 2334 "destination_service_id": "web", 2335 "local_service_port": 1234, 2336 "local_service_address": "127.0.0.1", 2337 "config": { 2338 "destination_type": "proxy.config is 'opaque' so should not get translated" 2339 }, 2340 "upstreams": [ 2341 { 2342 "destination_type": "service", 2343 "destination_namespace": "default", 2344 "destination_name": "db", 2345 "local_bind_address": "127.0.0.1", 2346 "local_bind_port": 1234, 2347 "config": { 2348 "destination_type": "proxy.upstreams.config is 'opaque' so should not get translated" 2349 } 2350 } 2351 ] 2352 }, 2353 "connect": { 2354 "proxy": { 2355 "exec_mode": "script", 2356 "config": { 2357 "destination_type": "connect.proxy.config is 'opaque' so should not get translated" 2358 }, 2359 "upstreams": [ 2360 { 2361 "destination_type": "service", 2362 "destination_namespace": "default", 2363 "destination_name": "db", 2364 "local_bind_address": "127.0.0.1", 2365 "local_bind_port": 1234, 2366 "config": { 2367 "destination_type": "connect.proxy.upstreams.config is 'opaque' so should not get translated" 2368 } 2369 } 2370 ] 2371 }, 2372 "sidecar_service": { 2373 "name":"test-proxy", 2374 "port":8001, 2375 "enable_tag_override": true, 2376 "meta": { 2377 "some": "meta", 2378 "enable_tag_override": "sidecar_service.meta is 'opaque' so should not get translated" 2379 }, 2380 "kind": "connect-proxy", 2381 "proxy": { 2382 "destination_service_name": "test", 2383 "destination_service_id": "test", 2384 "local_service_port": 4321, 2385 "local_service_address": "127.0.0.1", 2386 "upstreams": [ 2387 { 2388 "destination_type": "service", 2389 "destination_namespace": "default", 2390 "destination_name": "db", 2391 "local_bind_address": "127.0.0.1", 2392 "local_bind_port": 1234, 2393 "config": { 2394 "destination_type": "sidecar_service.proxy.upstreams.config is 'opaque' so should not get translated" 2395 } 2396 } 2397 ] 2398 } 2399 } 2400 }, 2401 "weights":{ 2402 "passing": 16 2403 } 2404 }` 2405 req, _ := http.NewRequest("PUT", "/v1/agent/service/register", strings.NewReader(json)) 2406 2407 rr := httptest.NewRecorder() 2408 obj, err := a.srv.AgentRegisterService(rr, req) 2409 require.NoError(t, err) 2410 require.Nil(t, obj) 2411 require.Equal(t, 200, rr.Code, "body: %s", rr.Body) 2412 2413 svc := &structs.NodeService{ 2414 ID: "test", 2415 Service: "test", 2416 Meta: map[string]string{ 2417 "some": "meta", 2418 "enable_tag_override": "meta is 'opaque' so should not get translated", 2419 }, 2420 Port: 8000, 2421 EnableTagOverride: true, 2422 Weights: &structs.Weights{Passing: 16, Warning: 0}, 2423 Kind: structs.ServiceKindConnectProxy, 2424 Proxy: structs.ConnectProxyConfig{ 2425 DestinationServiceName: "web", 2426 DestinationServiceID: "web", 2427 LocalServiceAddress: "127.0.0.1", 2428 LocalServicePort: 1234, 2429 Config: map[string]interface{}{ 2430 "destination_type": "proxy.config is 'opaque' so should not get translated", 2431 }, 2432 Upstreams: structs.Upstreams{ 2433 { 2434 DestinationType: structs.UpstreamDestTypeService, 2435 DestinationName: "db", 2436 DestinationNamespace: "default", 2437 LocalBindAddress: "127.0.0.1", 2438 LocalBindPort: 1234, 2439 Config: map[string]interface{}{ 2440 "destination_type": "proxy.upstreams.config is 'opaque' so should not get translated", 2441 }, 2442 }, 2443 }, 2444 }, 2445 Connect: structs.ServiceConnect{ 2446 Proxy: &structs.ServiceDefinitionConnectProxy{ 2447 ExecMode: "script", 2448 Config: map[string]interface{}{ 2449 "destination_type": "connect.proxy.config is 'opaque' so should not get translated", 2450 }, 2451 Upstreams: structs.Upstreams{ 2452 { 2453 DestinationType: structs.UpstreamDestTypeService, 2454 DestinationName: "db", 2455 DestinationNamespace: "default", 2456 LocalBindAddress: "127.0.0.1", 2457 LocalBindPort: 1234, 2458 Config: map[string]interface{}{ 2459 "destination_type": "connect.proxy.upstreams.config is 'opaque' so should not get translated", 2460 }, 2461 }, 2462 }, 2463 }, 2464 // The sidecar service is nilled since it is only config sugar and 2465 // shouldn't be represented in state. We assert that the translations 2466 // there worked by inspecting the registered sidecar below. 2467 SidecarService: nil, 2468 }, 2469 } 2470 2471 got := a.State.Service("test") 2472 require.Equal(t, svc, got) 2473 2474 sidecarSvc := &structs.NodeService{ 2475 Kind: structs.ServiceKindConnectProxy, 2476 ID: "test-sidecar-proxy", 2477 Service: "test-proxy", 2478 Meta: map[string]string{ 2479 "some": "meta", 2480 "enable_tag_override": "sidecar_service.meta is 'opaque' so should not get translated", 2481 }, 2482 Port: 8001, 2483 EnableTagOverride: true, 2484 Weights: &structs.Weights{Passing: 1, Warning: 1}, 2485 LocallyRegisteredAsSidecar: true, 2486 Proxy: structs.ConnectProxyConfig{ 2487 DestinationServiceName: "test", 2488 DestinationServiceID: "test", 2489 LocalServiceAddress: "127.0.0.1", 2490 LocalServicePort: 4321, 2491 Upstreams: structs.Upstreams{ 2492 { 2493 DestinationType: structs.UpstreamDestTypeService, 2494 DestinationName: "db", 2495 DestinationNamespace: "default", 2496 LocalBindAddress: "127.0.0.1", 2497 LocalBindPort: 1234, 2498 Config: map[string]interface{}{ 2499 "destination_type": "sidecar_service.proxy.upstreams.config is 'opaque' so should not get translated", 2500 }, 2501 }, 2502 }, 2503 }, 2504 } 2505 2506 gotSidecar := a.State.Service("test-sidecar-proxy") 2507 require.Equal(t, sidecarSvc, gotSidecar) 2508 } 2509 2510 func TestAgent_RegisterService_ACLDeny(t *testing.T) { 2511 t.Parallel() 2512 a := NewTestAgent(t, t.Name(), TestACLConfig()) 2513 defer a.Shutdown() 2514 testrpc.WaitForLeader(t, a.RPC, "dc1") 2515 2516 args := &structs.ServiceDefinition{ 2517 Name: "test", 2518 Tags: []string{"master"}, 2519 Port: 8000, 2520 Check: structs.CheckType{ 2521 TTL: 15 * time.Second, 2522 }, 2523 Checks: []*structs.CheckType{ 2524 &structs.CheckType{ 2525 TTL: 20 * time.Second, 2526 }, 2527 &structs.CheckType{ 2528 TTL: 30 * time.Second, 2529 }, 2530 }, 2531 } 2532 2533 t.Run("no token", func(t *testing.T) { 2534 req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) 2535 if _, err := a.srv.AgentRegisterService(nil, req); !acl.IsErrPermissionDenied(err) { 2536 t.Fatalf("err: %v", err) 2537 } 2538 }) 2539 2540 t.Run("root token", func(t *testing.T) { 2541 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(args)) 2542 if _, err := a.srv.AgentRegisterService(nil, req); err != nil { 2543 t.Fatalf("err: %v", err) 2544 } 2545 }) 2546 } 2547 2548 func TestAgent_RegisterService_InvalidAddress(t *testing.T) { 2549 t.Parallel() 2550 a := NewTestAgent(t, t.Name(), "") 2551 defer a.Shutdown() 2552 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 2553 2554 for _, addr := range []string{"0.0.0.0", "::", "[::]"} { 2555 t.Run("addr "+addr, func(t *testing.T) { 2556 args := &structs.ServiceDefinition{ 2557 Name: "test", 2558 Address: addr, 2559 Port: 8000, 2560 } 2561 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) 2562 resp := httptest.NewRecorder() 2563 _, err := a.srv.AgentRegisterService(resp, req) 2564 if err != nil { 2565 t.Fatalf("got error %v want nil", err) 2566 } 2567 if got, want := resp.Code, 400; got != want { 2568 t.Fatalf("got code %d want %d", got, want) 2569 } 2570 if got, want := resp.Body.String(), "Invalid service address"; got != want { 2571 t.Fatalf("got body %q want %q", got, want) 2572 } 2573 }) 2574 } 2575 } 2576 2577 // This tests local agent service registration with a managed proxy. 2578 func TestAgent_RegisterService_ManagedConnectProxy(t *testing.T) { 2579 t.Parallel() 2580 2581 assert := assert.New(t) 2582 require := require.New(t) 2583 a := NewTestAgent(t, t.Name(), ` 2584 connect { 2585 proxy { 2586 allow_managed_api_registration = true 2587 } 2588 } 2589 `) 2590 defer a.Shutdown() 2591 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 2592 2593 // Register a proxy. Note that the destination doesn't exist here on 2594 // this agent or in the catalog at all. This is intended and part 2595 // of the design. 2596 args := &api.AgentServiceRegistration{ 2597 Name: "web", 2598 Port: 8000, 2599 Connect: &api.AgentServiceConnect{ 2600 Proxy: &api.AgentServiceConnectProxy{ 2601 ExecMode: "script", 2602 Command: []string{"proxy.sh"}, 2603 Config: map[string]interface{}{ 2604 "foo": "bar", 2605 }, 2606 // Includes an upstream with missing defaulted type 2607 Upstreams: structs.TestUpstreams(t).ToAPI(), 2608 }, 2609 }, 2610 } 2611 2612 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) 2613 resp := httptest.NewRecorder() 2614 obj, err := a.srv.AgentRegisterService(resp, req) 2615 assert.NoError(err) 2616 assert.Nil(obj) 2617 require.Equal(200, resp.Code, "request failed with body: %s", 2618 resp.Body.String()) 2619 2620 // Ensure the target service 2621 _, ok := a.State.Services()["web"] 2622 assert.True(ok, "has service") 2623 2624 // Ensure the proxy service was registered 2625 proxySvc, ok := a.State.Services()["web-proxy"] 2626 require.True(ok, "has proxy service") 2627 assert.Equal(structs.ServiceKindConnectProxy, proxySvc.Kind) 2628 assert.Equal("web", proxySvc.Proxy.DestinationServiceName) 2629 assert.NotEmpty(proxySvc.Port, "a port should have been assigned") 2630 2631 // Ensure proxy itself was registered 2632 proxy := a.State.Proxy("web-proxy") 2633 require.NotNil(proxy) 2634 assert.Equal(structs.ProxyExecModeScript, proxy.Proxy.ExecMode) 2635 assert.Equal([]string{"proxy.sh"}, proxy.Proxy.Command) 2636 assert.Equal(args.Connect.Proxy.Config, proxy.Proxy.Config) 2637 // Unsure the defaulted type is explicitly filled 2638 args.Connect.Proxy.Upstreams[0].DestinationType = api.UpstreamDestTypeService 2639 assert.Equal(args.Connect.Proxy.Upstreams, 2640 proxy.Proxy.Upstreams.ToAPI()) 2641 2642 // Ensure the token was configured 2643 assert.Equal("abc123", a.State.ServiceToken("web")) 2644 assert.Equal("abc123", a.State.ServiceToken("web-proxy")) 2645 } 2646 2647 // This tests local agent service registration with a managed proxy using 2648 // original deprecated upstreams syntax. 2649 func TestAgent_RegisterService_ManagedConnectProxyDeprecated(t *testing.T) { 2650 t.Parallel() 2651 2652 assert := assert.New(t) 2653 require := require.New(t) 2654 a := NewTestAgent(t, t.Name(), ` 2655 connect { 2656 proxy { 2657 allow_managed_api_registration = true 2658 } 2659 } 2660 `) 2661 defer a.Shutdown() 2662 testrpc.WaitForLeader(t, a.RPC, "dc1") 2663 2664 // Register a proxy. Note that the destination doesn't exist here on 2665 // this agent or in the catalog at all. This is intended and part 2666 // of the design. 2667 args := &api.AgentServiceRegistration{ 2668 Name: "web", 2669 Port: 8000, 2670 Connect: &api.AgentServiceConnect{ 2671 Proxy: &api.AgentServiceConnectProxy{ 2672 ExecMode: "script", 2673 Command: []string{"proxy.sh"}, 2674 Config: map[string]interface{}{ 2675 "foo": "bar", 2676 "upstreams": []interface{}{ 2677 map[string]interface{}{ 2678 "destination_name": "db", 2679 "local_bind_port": 1234, 2680 // this was a field for old upstreams we don't support any more. 2681 // It should be copied into Upstreams' Config. 2682 "connect_timeout_ms": 1000, 2683 }, 2684 map[string]interface{}{ 2685 "destination_name": "geo-cache", 2686 "destination_type": "prepared_query", 2687 "local_bind_port": 1235, 2688 }, 2689 }, 2690 }, 2691 }, 2692 }, 2693 } 2694 2695 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) 2696 resp := httptest.NewRecorder() 2697 obj, err := a.srv.AgentRegisterService(resp, req) 2698 assert.NoError(err) 2699 assert.Nil(obj) 2700 require.Equal(200, resp.Code, "request failed with body: %s", 2701 resp.Body.String()) 2702 2703 // Ensure the target service 2704 _, ok := a.State.Services()["web"] 2705 assert.True(ok, "has service") 2706 2707 // Ensure the proxy service was registered 2708 proxySvc, ok := a.State.Services()["web-proxy"] 2709 require.True(ok, "has proxy service") 2710 assert.Equal(structs.ServiceKindConnectProxy, proxySvc.Kind) 2711 assert.Equal("web", proxySvc.Proxy.DestinationServiceName) 2712 assert.NotEmpty(proxySvc.Port, "a port should have been assigned") 2713 2714 // Ensure proxy itself was registered 2715 proxy := a.State.Proxy("web-proxy") 2716 require.NotNil(proxy) 2717 assert.Equal(structs.ProxyExecModeScript, proxy.Proxy.ExecMode) 2718 assert.Equal([]string{"proxy.sh"}, proxy.Proxy.Command) 2719 // Remove the upstreams from the args - we expect them not to show up in 2720 // response now since that moved. 2721 delete(args.Connect.Proxy.Config, "upstreams") 2722 assert.Equal(args.Connect.Proxy.Config, proxy.Proxy.Config) 2723 expectUpstreams := structs.Upstreams{ 2724 { 2725 DestinationType: structs.UpstreamDestTypeService, 2726 DestinationName: "db", 2727 LocalBindPort: 1234, 2728 Config: map[string]interface{}{ 2729 "connect_timeout_ms": float64(1000), 2730 }, 2731 }, 2732 { 2733 DestinationType: structs.UpstreamDestTypePreparedQuery, 2734 DestinationName: "geo-cache", 2735 LocalBindPort: 1235, 2736 }, 2737 } 2738 assert.Equal(expectUpstreams, proxy.Proxy.Upstreams) 2739 2740 // Ensure the token was configured 2741 assert.Equal("abc123", a.State.ServiceToken("web")) 2742 assert.Equal("abc123", a.State.ServiceToken("web-proxy")) 2743 } 2744 2745 // This tests local agent service registration with a managed proxy with 2746 // API registration disabled (default). 2747 func TestAgent_RegisterService_ManagedConnectProxy_Disabled(t *testing.T) { 2748 t.Parallel() 2749 2750 assert := assert.New(t) 2751 a := NewTestAgent(t, t.Name(), ``) 2752 defer a.Shutdown() 2753 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 2754 2755 // Register a proxy. Note that the destination doesn't exist here on 2756 // this agent or in the catalog at all. This is intended and part 2757 // of the design. 2758 args := &api.AgentServiceRegistration{ 2759 Name: "web", 2760 Port: 8000, 2761 Connect: &api.AgentServiceConnect{ 2762 Proxy: &api.AgentServiceConnectProxy{ 2763 ExecMode: "script", 2764 Command: []string{"proxy.sh"}, 2765 Config: map[string]interface{}{ 2766 "foo": "bar", 2767 }, 2768 }, 2769 }, 2770 } 2771 2772 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) 2773 resp := httptest.NewRecorder() 2774 _, err := a.srv.AgentRegisterService(resp, req) 2775 assert.Error(err) 2776 2777 // Ensure the target service does not exist 2778 _, ok := a.State.Services()["web"] 2779 assert.False(ok, "does not has service") 2780 } 2781 2782 // This tests local agent service registration of a unmanaged connect proxy. 2783 // This verifies that it is put in the local state store properly for syncing 2784 // later. Note that _managed_ connect proxies are registered as part of the 2785 // target service's registration. 2786 func TestAgent_RegisterService_UnmanagedConnectProxy(t *testing.T) { 2787 t.Parallel() 2788 2789 assert := assert.New(t) 2790 a := NewTestAgent(t, t.Name(), "") 2791 defer a.Shutdown() 2792 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 2793 2794 // Register a proxy. Note that the destination doesn't exist here on this 2795 // agent or in the catalog at all. This is intended and part of the design. 2796 args := &api.AgentServiceRegistration{ 2797 Kind: api.ServiceKindConnectProxy, 2798 Name: "connect-proxy", 2799 Port: 8000, 2800 // DEPRECATED (ProxyDestination) - remove this when removing ProxyDestination 2801 ProxyDestination: "bad_destination", // Deprecated, check it's overridden 2802 Proxy: &api.AgentServiceConnectProxyConfig{ 2803 DestinationServiceName: "web", 2804 Upstreams: []api.Upstream{ 2805 { 2806 // No type to force default 2807 DestinationName: "db", 2808 LocalBindPort: 1234, 2809 }, 2810 { 2811 DestinationType: "prepared_query", 2812 DestinationName: "geo-cache", 2813 LocalBindPort: 1235, 2814 }, 2815 }, 2816 }, 2817 } 2818 2819 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) 2820 resp := httptest.NewRecorder() 2821 obj, err := a.srv.AgentRegisterService(resp, req) 2822 require.NoError(t, err) 2823 assert.Nil(obj) 2824 2825 // Ensure the service 2826 svc, ok := a.State.Services()["connect-proxy"] 2827 assert.True(ok, "has service") 2828 assert.Equal(structs.ServiceKindConnectProxy, svc.Kind) 2829 // Registration must set that default type 2830 args.Proxy.Upstreams[0].DestinationType = api.UpstreamDestTypeService 2831 assert.Equal(args.Proxy, svc.Proxy.ToAPI()) 2832 2833 // Ensure the token was configured 2834 assert.Equal("abc123", a.State.ServiceToken("connect-proxy")) 2835 } 2836 2837 func testDefaultSidecar(svc string, port int, fns ...func(*structs.NodeService)) *structs.NodeService { 2838 ns := &structs.NodeService{ 2839 ID: svc + "-sidecar-proxy", 2840 Kind: structs.ServiceKindConnectProxy, 2841 Service: svc + "-sidecar-proxy", 2842 Port: 2222, 2843 Weights: &structs.Weights{ 2844 Passing: 1, 2845 Warning: 1, 2846 }, 2847 // Note that LocallyRegisteredAsSidecar should be true on the internal 2848 // NodeService, but that we never want to see it in the HTTP response as 2849 // it's internal only state. This is being compared directly to local state 2850 // so should be present here. 2851 LocallyRegisteredAsSidecar: true, 2852 Proxy: structs.ConnectProxyConfig{ 2853 DestinationServiceName: svc, 2854 DestinationServiceID: svc, 2855 LocalServiceAddress: "127.0.0.1", 2856 LocalServicePort: port, 2857 }, 2858 } 2859 for _, fn := range fns { 2860 fn(ns) 2861 } 2862 return ns 2863 } 2864 2865 // testCreateToken creates a Policy for the provided rules and a Token linked to that Policy. 2866 func testCreateToken(t *testing.T, a *TestAgent, rules string) string { 2867 policyName, err := uuid.GenerateUUID() // we just need a unique name for the test and UUIDs are definitely unique 2868 require.NoError(t, err) 2869 2870 policyID := testCreatePolicy(t, a, policyName, rules) 2871 2872 args := map[string]interface{}{ 2873 "Name": "User Token", 2874 "Policies": []map[string]interface{}{ 2875 map[string]interface{}{ 2876 "ID": policyID, 2877 }, 2878 }, 2879 "Local": false, 2880 } 2881 req, _ := http.NewRequest("PUT", "/v1/acl/token?token=root", jsonReader(args)) 2882 resp := httptest.NewRecorder() 2883 obj, err := a.srv.ACLTokenCreate(resp, req) 2884 require.NoError(t, err) 2885 require.NotNil(t, obj) 2886 aclResp := obj.(*structs.ACLToken) 2887 return aclResp.SecretID 2888 } 2889 2890 func testCreatePolicy(t *testing.T, a *TestAgent, name, rules string) string { 2891 args := map[string]interface{}{ 2892 "Name": name, 2893 "Rules": rules, 2894 } 2895 req, _ := http.NewRequest("PUT", "/v1/acl/policy?token=root", jsonReader(args)) 2896 resp := httptest.NewRecorder() 2897 obj, err := a.srv.ACLPolicyCreate(resp, req) 2898 require.NoError(t, err) 2899 require.NotNil(t, obj) 2900 aclResp := obj.(*structs.ACLPolicy) 2901 return aclResp.ID 2902 } 2903 2904 // This tests local agent service registration with a sidecar service. Note we 2905 // only test simple defaults for the sidecar here since the actual logic for 2906 // handling sidecar defaults and port assignment is tested thoroughly in 2907 // TestAgent_sidecarServiceFromNodeService. Note it also tests Deregister 2908 // explicitly too since setup is identical. 2909 func TestAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T) { 2910 t.Parallel() 2911 2912 tests := []struct { 2913 name string 2914 preRegister, preRegister2 *structs.NodeService 2915 // Use raw JSON payloads rather than encoding to avoid subtleties with some 2916 // internal representations and different ways they encode and decode. We 2917 // rely on the payload being Unmarshalable to structs.ServiceDefinition 2918 // directly. 2919 json string 2920 enableACL bool 2921 tokenRules string 2922 wantNS *structs.NodeService 2923 wantErr string 2924 wantSidecarIDLeftAfterDereg bool 2925 assertStateFn func(t *testing.T, state *local.State) 2926 }{ 2927 { 2928 name: "sanity check no sidecar case", 2929 json: ` 2930 { 2931 "name": "web", 2932 "port": 1111 2933 } 2934 `, 2935 wantNS: nil, 2936 wantErr: "", 2937 }, 2938 { 2939 name: "default sidecar", 2940 json: ` 2941 { 2942 "name": "web", 2943 "port": 1111, 2944 "connect": { 2945 "SidecarService": {} 2946 } 2947 } 2948 `, 2949 wantNS: testDefaultSidecar("web", 1111), 2950 wantErr: "", 2951 }, 2952 { 2953 name: "ACL OK defaults", 2954 json: ` 2955 { 2956 "name": "web", 2957 "port": 1111, 2958 "connect": { 2959 "SidecarService": {} 2960 } 2961 } 2962 `, 2963 enableACL: true, 2964 tokenRules: ` 2965 service "web-sidecar-proxy" { 2966 policy = "write" 2967 } 2968 service "web" { 2969 policy = "write" 2970 }`, 2971 wantNS: testDefaultSidecar("web", 1111), 2972 wantErr: "", 2973 }, 2974 { 2975 name: "ACL denied", 2976 json: ` 2977 { 2978 "name": "web", 2979 "port": 1111, 2980 "connect": { 2981 "SidecarService": {} 2982 } 2983 } 2984 `, 2985 enableACL: true, 2986 tokenRules: ``, // No token rules means no valid token 2987 wantNS: nil, 2988 wantErr: "Permission denied", 2989 }, 2990 { 2991 name: "ACL OK for service but not for sidecar", 2992 json: ` 2993 { 2994 "name": "web", 2995 "port": 1111, 2996 "connect": { 2997 "SidecarService": {} 2998 } 2999 } 3000 `, 3001 enableACL: true, 3002 // This will become more common/reasonable when ACLs support exact match. 3003 tokenRules: ` 3004 service "web-sidecar-proxy" { 3005 policy = "deny" 3006 } 3007 service "web" { 3008 policy = "write" 3009 }`, 3010 wantNS: nil, 3011 wantErr: "Permission denied", 3012 }, 3013 { 3014 name: "ACL OK for service and sidecar but not sidecar's overridden destination", 3015 json: ` 3016 { 3017 "name": "web", 3018 "port": 1111, 3019 "connect": { 3020 "SidecarService": { 3021 "proxy": { 3022 "DestinationServiceName": "foo" 3023 } 3024 } 3025 } 3026 } 3027 `, 3028 enableACL: true, 3029 tokenRules: ` 3030 service "web-sidecar-proxy" { 3031 policy = "write" 3032 } 3033 service "web" { 3034 policy = "write" 3035 }`, 3036 wantNS: nil, 3037 wantErr: "Permission denied", 3038 }, 3039 { 3040 name: "ACL OK for service but not for overridden sidecar", 3041 json: ` 3042 { 3043 "name": "web", 3044 "port": 1111, 3045 "connect": { 3046 "SidecarService": { 3047 "name": "foo-sidecar-proxy" 3048 } 3049 } 3050 } 3051 `, 3052 enableACL: true, 3053 tokenRules: ` 3054 service "web-sidecar-proxy" { 3055 policy = "write" 3056 } 3057 service "web" { 3058 policy = "write" 3059 }`, 3060 wantNS: nil, 3061 wantErr: "Permission denied", 3062 }, 3063 { 3064 name: "ACL OK for service but and overridden for sidecar", 3065 // This test ensures that if the sidecar embeds it's own token with 3066 // different privs from the main request token it will be honored for the 3067 // sidecar registration. We use the test root token since that should have 3068 // permission. 3069 json: ` 3070 { 3071 "name": "web", 3072 "port": 1111, 3073 "connect": { 3074 "SidecarService": { 3075 "name": "foo", 3076 "token": "root" 3077 } 3078 } 3079 } 3080 `, 3081 enableACL: true, 3082 tokenRules: ` 3083 service "web-sidecar-proxy" { 3084 policy = "write" 3085 } 3086 service "web" { 3087 policy = "write" 3088 }`, 3089 wantNS: testDefaultSidecar("web", 1111, func(ns *structs.NodeService) { 3090 ns.Service = "foo" 3091 }), 3092 wantErr: "", 3093 }, 3094 { 3095 name: "invalid check definition in sidecar", 3096 // Note no interval in the TCP check should fail validation 3097 json: ` 3098 { 3099 "name": "web", 3100 "port": 1111, 3101 "connect": { 3102 "SidecarService": { 3103 "check": { 3104 "TCP": "foo" 3105 } 3106 } 3107 } 3108 } 3109 `, 3110 wantNS: nil, 3111 wantErr: "invalid check in sidecar_service", 3112 }, 3113 { 3114 name: "invalid checks definitions in sidecar", 3115 // Note no interval in the TCP check should fail validation 3116 json: ` 3117 { 3118 "name": "web", 3119 "port": 1111, 3120 "connect": { 3121 "SidecarService": { 3122 "checks": [{ 3123 "TCP": "foo" 3124 }] 3125 } 3126 } 3127 } 3128 `, 3129 wantNS: nil, 3130 wantErr: "invalid check in sidecar_service", 3131 }, 3132 { 3133 name: "invalid check status in sidecar", 3134 // Note no interval in the TCP check should fail validation 3135 json: ` 3136 { 3137 "name": "web", 3138 "port": 1111, 3139 "connect": { 3140 "SidecarService": { 3141 "check": { 3142 "TCP": "foo", 3143 "Interval": 10, 3144 "Status": "unsupported-status" 3145 } 3146 } 3147 } 3148 } 3149 `, 3150 wantNS: nil, 3151 wantErr: "Status for checks must 'passing', 'warning', 'critical'", 3152 }, 3153 { 3154 name: "invalid checks status in sidecar", 3155 // Note no interval in the TCP check should fail validation 3156 json: ` 3157 { 3158 "name": "web", 3159 "port": 1111, 3160 "connect": { 3161 "SidecarService": { 3162 "checks": [{ 3163 "TCP": "foo", 3164 "Interval": 10, 3165 "Status": "unsupported-status" 3166 }] 3167 } 3168 } 3169 } 3170 `, 3171 wantNS: nil, 3172 wantErr: "Status for checks must 'passing', 'warning', 'critical'", 3173 }, 3174 { 3175 name: "another service registered with same ID as a sidecar should not be deregistered", 3176 // Add another service with the same ID that a sidecar for web would have 3177 preRegister: &structs.NodeService{ 3178 ID: "web-sidecar-proxy", 3179 Service: "fake-sidecar", 3180 Port: 9999, 3181 }, 3182 // Register web with NO SIDECAR 3183 json: ` 3184 { 3185 "name": "web", 3186 "port": 1111 3187 } 3188 `, 3189 // Note here that although the registration here didn't register it, we 3190 // should still see the NodeService we pre-registered here. 3191 wantNS: &structs.NodeService{ 3192 ID: "web-sidecar-proxy", 3193 Service: "fake-sidecar", 3194 Port: 9999, 3195 Weights: &structs.Weights{ 3196 Passing: 1, 3197 Warning: 1, 3198 }, 3199 }, 3200 // After we deregister the web service above, the fake sidecar with 3201 // clashing ID SHOULD NOT have been removed since it wasn't part of the 3202 // original registration. 3203 wantSidecarIDLeftAfterDereg: true, 3204 }, 3205 { 3206 name: "updates to sidecar should work", 3207 // Add a valid sidecar already registered 3208 preRegister: &structs.NodeService{ 3209 ID: "web-sidecar-proxy", 3210 Service: "web-sidecar-proxy", 3211 LocallyRegisteredAsSidecar: true, 3212 Port: 9999, 3213 }, 3214 // Register web with Sidecar on different port 3215 json: ` 3216 { 3217 "name": "web", 3218 "port": 1111, 3219 "connect": { 3220 "SidecarService": { 3221 "Port": 6666 3222 } 3223 } 3224 } 3225 `, 3226 // Note here that although the registration here didn't register it, we 3227 // should still see the NodeService we pre-registered here. 3228 wantNS: &structs.NodeService{ 3229 Kind: "connect-proxy", 3230 ID: "web-sidecar-proxy", 3231 Service: "web-sidecar-proxy", 3232 LocallyRegisteredAsSidecar: true, 3233 Port: 6666, 3234 Weights: &structs.Weights{ 3235 Passing: 1, 3236 Warning: 1, 3237 }, 3238 Proxy: structs.ConnectProxyConfig{ 3239 DestinationServiceName: "web", 3240 DestinationServiceID: "web", 3241 LocalServiceAddress: "127.0.0.1", 3242 LocalServicePort: 1111, 3243 }, 3244 }, 3245 }, 3246 { 3247 name: "update that removes sidecar should NOT deregister it", 3248 // Add web with a valid sidecar already registered 3249 preRegister: &structs.NodeService{ 3250 ID: "web", 3251 Service: "web", 3252 Port: 1111, 3253 }, 3254 preRegister2: testDefaultSidecar("web", 1111), 3255 // Register (update) web and remove sidecar (and port for sanity check) 3256 json: ` 3257 { 3258 "name": "web", 3259 "port": 2222 3260 } 3261 `, 3262 // Sidecar should still be there such that API can update registration 3263 // without accidentally removing a sidecar. This is equivalent to embedded 3264 // checks which are not removed by just not being included in an update. 3265 // We will document that sidecar registrations via API must be explicitiy 3266 // deregistered. 3267 wantNS: testDefaultSidecar("web", 1111), 3268 // Sanity check the rest of the update happened though. 3269 assertStateFn: func(t *testing.T, state *local.State) { 3270 svcs := state.Services() 3271 svc, ok := svcs["web"] 3272 require.True(t, ok) 3273 require.Equal(t, 2222, svc.Port) 3274 }, 3275 }, 3276 } 3277 3278 for _, tt := range tests { 3279 t.Run(tt.name, func(t *testing.T) { 3280 assert := assert.New(t) 3281 require := require.New(t) 3282 3283 // Constrain auto ports to 1 available to make it deterministic 3284 hcl := `ports { 3285 sidecar_min_port = 2222 3286 sidecar_max_port = 2222 3287 } 3288 ` 3289 if tt.enableACL { 3290 hcl = hcl + TestACLConfig() 3291 } 3292 3293 a := NewTestAgent(t, t.Name(), hcl) 3294 defer a.Shutdown() 3295 testrpc.WaitForLeader(t, a.RPC, "dc1") 3296 3297 if tt.preRegister != nil { 3298 require.NoError(a.AddService(tt.preRegister, nil, false, "", ConfigSourceLocal)) 3299 } 3300 if tt.preRegister2 != nil { 3301 require.NoError(a.AddService(tt.preRegister2, nil, false, "", ConfigSourceLocal)) 3302 } 3303 3304 // Create an ACL token with require policy 3305 var token string 3306 if tt.enableACL && tt.tokenRules != "" { 3307 token = testCreateToken(t, a, tt.tokenRules) 3308 } 3309 3310 br := bytes.NewBufferString(tt.json) 3311 3312 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token="+token, br) 3313 resp := httptest.NewRecorder() 3314 obj, err := a.srv.AgentRegisterService(resp, req) 3315 if tt.wantErr != "" { 3316 require.Error(err, "response code=%d, body:\n%s", 3317 resp.Code, resp.Body.String()) 3318 require.Contains(strings.ToLower(err.Error()), strings.ToLower(tt.wantErr)) 3319 return 3320 } 3321 require.NoError(err) 3322 assert.Nil(obj) 3323 require.Equal(200, resp.Code, "request failed with body: %s", 3324 resp.Body.String()) 3325 3326 // Sanity the target service registration 3327 svcs := a.State.Services() 3328 3329 // Parse the expected definition into a ServiceDefinition 3330 var sd structs.ServiceDefinition 3331 err = json.Unmarshal([]byte(tt.json), &sd) 3332 require.NoError(err) 3333 require.NotEmpty(sd.Name) 3334 3335 svcID := sd.ID 3336 if svcID == "" { 3337 svcID = sd.Name 3338 } 3339 svc, ok := svcs[svcID] 3340 require.True(ok, "has service "+svcID) 3341 assert.Equal(sd.Name, svc.Service) 3342 assert.Equal(sd.Port, svc.Port) 3343 // Ensure that the actual registered service _doesn't_ still have it's 3344 // sidecar info since it's duplicate and we don't want that synced up to 3345 // the catalog or included in responses particularly - it's just 3346 // registration syntax sugar. 3347 assert.Nil(svc.Connect.SidecarService) 3348 3349 if tt.wantNS == nil { 3350 // Sanity check that there was no service registered, we rely on there 3351 // being no services at start of test so we can just use the count. 3352 assert.Len(svcs, 1, "should be no sidecar registered") 3353 return 3354 } 3355 3356 // Ensure sidecar 3357 svc, ok = svcs[tt.wantNS.ID] 3358 require.True(ok, "no sidecar registered at "+tt.wantNS.ID) 3359 assert.Equal(tt.wantNS, svc) 3360 3361 if tt.assertStateFn != nil { 3362 tt.assertStateFn(t, a.State) 3363 } 3364 3365 // Now verify deregistration also removes sidecar (if there was one and it 3366 // was added via sidecar not just coincidental ID clash) 3367 { 3368 req := httptest.NewRequest("PUT", 3369 "/v1/agent/service/deregister/"+svcID+"?token="+token, nil) 3370 resp := httptest.NewRecorder() 3371 obj, err := a.srv.AgentDeregisterService(resp, req) 3372 require.NoError(err) 3373 require.Nil(obj) 3374 3375 svcs := a.State.Services() 3376 svc, ok = svcs[tt.wantNS.ID] 3377 if tt.wantSidecarIDLeftAfterDereg { 3378 require.True(ok, "removed non-sidecar service at "+tt.wantNS.ID) 3379 } else { 3380 require.False(ok, "sidecar not deregistered with service "+svcID) 3381 } 3382 } 3383 }) 3384 } 3385 } 3386 3387 // This tests that connect proxy validation is done for local agent 3388 // registration. This doesn't need to test validation exhaustively since 3389 // that is done via a table test in the structs package. 3390 func TestAgent_RegisterService_UnmanagedConnectProxyInvalid(t *testing.T) { 3391 t.Parallel() 3392 3393 assert := assert.New(t) 3394 a := NewTestAgent(t, t.Name(), "") 3395 defer a.Shutdown() 3396 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 3397 3398 args := &structs.ServiceDefinition{ 3399 Kind: structs.ServiceKindConnectProxy, 3400 Name: "connect-proxy", 3401 Proxy: &structs.ConnectProxyConfig{ 3402 DestinationServiceName: "db", 3403 }, 3404 Check: structs.CheckType{ 3405 TTL: 15 * time.Second, 3406 }, 3407 } 3408 3409 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) 3410 resp := httptest.NewRecorder() 3411 obj, err := a.srv.AgentRegisterService(resp, req) 3412 assert.Nil(err) 3413 assert.Nil(obj) 3414 assert.Equal(http.StatusBadRequest, resp.Code) 3415 assert.Contains(resp.Body.String(), "Port") 3416 3417 // Ensure the service doesn't exist 3418 _, ok := a.State.Services()["connect-proxy"] 3419 assert.False(ok) 3420 } 3421 3422 // Tests agent registration of a service that is connect native. 3423 func TestAgent_RegisterService_ConnectNative(t *testing.T) { 3424 t.Parallel() 3425 3426 assert := assert.New(t) 3427 a := NewTestAgent(t, t.Name(), "") 3428 defer a.Shutdown() 3429 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 3430 3431 // Register a proxy. Note that the destination doesn't exist here on 3432 // this agent or in the catalog at all. This is intended and part 3433 // of the design. 3434 args := &structs.ServiceDefinition{ 3435 Name: "web", 3436 Port: 8000, 3437 Check: structs.CheckType{ 3438 TTL: 15 * time.Second, 3439 }, 3440 Connect: &structs.ServiceConnect{ 3441 Native: true, 3442 }, 3443 } 3444 3445 req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) 3446 resp := httptest.NewRecorder() 3447 obj, err := a.srv.AgentRegisterService(resp, req) 3448 assert.Nil(err) 3449 assert.Nil(obj) 3450 3451 // Ensure the service 3452 svc, ok := a.State.Services()["web"] 3453 assert.True(ok, "has service") 3454 assert.True(svc.Connect.Native) 3455 } 3456 3457 func TestAgent_RegisterService_ScriptCheck_ExecDisable(t *testing.T) { 3458 t.Parallel() 3459 a := NewTestAgent(t, t.Name(), "") 3460 defer a.Shutdown() 3461 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 3462 3463 args := &structs.ServiceDefinition{ 3464 Name: "test", 3465 Meta: map[string]string{"hello": "world"}, 3466 Tags: []string{"master"}, 3467 Port: 8000, 3468 Check: structs.CheckType{ 3469 Name: "test-check", 3470 Interval: time.Second, 3471 ScriptArgs: []string{"true"}, 3472 }, 3473 Weights: &structs.Weights{ 3474 Passing: 100, 3475 Warning: 3, 3476 }, 3477 } 3478 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) 3479 3480 _, err := a.srv.AgentRegisterService(nil, req) 3481 if err == nil { 3482 t.Fatalf("expected error but got nil") 3483 } 3484 if !strings.Contains(err.Error(), "Scripts are disabled on this agent") { 3485 t.Fatalf("expected script disabled error, got: %s", err) 3486 } 3487 checkID := types.CheckID("test-check") 3488 if _, ok := a.State.Checks()[checkID]; ok { 3489 t.Fatalf("check registered with exec disable") 3490 } 3491 } 3492 3493 func TestAgent_RegisterService_ScriptCheck_ExecRemoteDisable(t *testing.T) { 3494 t.Parallel() 3495 a := NewTestAgent(t, t.Name(), ` 3496 enable_local_script_checks = true 3497 `) 3498 defer a.Shutdown() 3499 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 3500 3501 args := &structs.ServiceDefinition{ 3502 Name: "test", 3503 Meta: map[string]string{"hello": "world"}, 3504 Tags: []string{"master"}, 3505 Port: 8000, 3506 Check: structs.CheckType{ 3507 Name: "test-check", 3508 Interval: time.Second, 3509 ScriptArgs: []string{"true"}, 3510 }, 3511 Weights: &structs.Weights{ 3512 Passing: 100, 3513 Warning: 3, 3514 }, 3515 } 3516 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) 3517 3518 _, err := a.srv.AgentRegisterService(nil, req) 3519 if err == nil { 3520 t.Fatalf("expected error but got nil") 3521 } 3522 if !strings.Contains(err.Error(), "Scripts are disabled on this agent") { 3523 t.Fatalf("expected script disabled error, got: %s", err) 3524 } 3525 checkID := types.CheckID("test-check") 3526 if _, ok := a.State.Checks()[checkID]; ok { 3527 t.Fatalf("check registered with exec disable") 3528 } 3529 } 3530 3531 func TestAgent_DeregisterService(t *testing.T) { 3532 t.Parallel() 3533 a := NewTestAgent(t, t.Name(), "") 3534 defer a.Shutdown() 3535 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 3536 3537 service := &structs.NodeService{ 3538 ID: "test", 3539 Service: "test", 3540 } 3541 if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil { 3542 t.Fatalf("err: %v", err) 3543 } 3544 3545 req, _ := http.NewRequest("PUT", "/v1/agent/service/deregister/test", nil) 3546 obj, err := a.srv.AgentDeregisterService(nil, req) 3547 if err != nil { 3548 t.Fatalf("err: %v", err) 3549 } 3550 if obj != nil { 3551 t.Fatalf("bad: %v", obj) 3552 } 3553 3554 // Ensure we have a check mapping 3555 if _, ok := a.State.Services()["test"]; ok { 3556 t.Fatalf("have test service") 3557 } 3558 3559 if _, ok := a.State.Checks()["test"]; ok { 3560 t.Fatalf("have test check") 3561 } 3562 } 3563 3564 func TestAgent_DeregisterService_ACLDeny(t *testing.T) { 3565 t.Parallel() 3566 a := NewTestAgent(t, t.Name(), TestACLConfig()) 3567 defer a.Shutdown() 3568 testrpc.WaitForLeader(t, a.RPC, "dc1") 3569 3570 service := &structs.NodeService{ 3571 ID: "test", 3572 Service: "test", 3573 } 3574 if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil { 3575 t.Fatalf("err: %v", err) 3576 } 3577 3578 t.Run("no token", func(t *testing.T) { 3579 req, _ := http.NewRequest("PUT", "/v1/agent/service/deregister/test", nil) 3580 if _, err := a.srv.AgentDeregisterService(nil, req); !acl.IsErrPermissionDenied(err) { 3581 t.Fatalf("err: %v", err) 3582 } 3583 }) 3584 3585 t.Run("root token", func(t *testing.T) { 3586 req, _ := http.NewRequest("PUT", "/v1/agent/service/deregister/test?token=root", nil) 3587 if _, err := a.srv.AgentDeregisterService(nil, req); err != nil { 3588 t.Fatalf("err: %v", err) 3589 } 3590 }) 3591 } 3592 3593 func TestAgent_DeregisterService_withManagedProxy(t *testing.T) { 3594 t.Parallel() 3595 require := require.New(t) 3596 a := NewTestAgent(t, t.Name(), ` 3597 connect { 3598 proxy { 3599 allow_managed_api_registration = true 3600 } 3601 } 3602 `) 3603 3604 defer a.Shutdown() 3605 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 3606 3607 // Register a service with a managed proxy 3608 { 3609 reg := &structs.ServiceDefinition{ 3610 ID: "test-id", 3611 Name: "test", 3612 Address: "127.0.0.1", 3613 Port: 8000, 3614 Check: structs.CheckType{ 3615 TTL: 15 * time.Second, 3616 }, 3617 Connect: &structs.ServiceConnect{ 3618 Proxy: &structs.ServiceDefinitionConnectProxy{}, 3619 }, 3620 } 3621 3622 req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(reg)) 3623 resp := httptest.NewRecorder() 3624 _, err := a.srv.AgentRegisterService(resp, req) 3625 require.NoError(err) 3626 require.Equal(200, resp.Code, "body: %s", resp.Body.String()) 3627 } 3628 3629 // Get the proxy ID 3630 require.Len(a.State.Proxies(), 1) 3631 var proxyID string 3632 for _, p := range a.State.Proxies() { 3633 proxyID = p.Proxy.ProxyService.ID 3634 } 3635 3636 req, _ := http.NewRequest("PUT", "/v1/agent/service/deregister/test-id", nil) 3637 obj, err := a.srv.AgentDeregisterService(nil, req) 3638 require.NoError(err) 3639 require.Nil(obj) 3640 3641 // Ensure we have no service, check, managed proxy, or proxy service 3642 require.NotContains(a.State.Services(), "test-id") 3643 require.NotContains(a.State.Checks(), "test-id") 3644 require.NotContains(a.State.Services(), proxyID) 3645 require.Len(a.State.Proxies(), 0) 3646 } 3647 3648 // Test that we can't deregister a managed proxy service directly. 3649 func TestAgent_DeregisterService_managedProxyDirect(t *testing.T) { 3650 t.Parallel() 3651 require := require.New(t) 3652 a := NewTestAgent(t, t.Name(), ` 3653 connect { 3654 proxy { 3655 allow_managed_api_registration = true 3656 } 3657 } 3658 `) 3659 3660 defer a.Shutdown() 3661 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 3662 3663 // Register a service with a managed proxy 3664 { 3665 reg := &structs.ServiceDefinition{ 3666 ID: "test-id", 3667 Name: "test", 3668 Address: "127.0.0.1", 3669 Port: 8000, 3670 Check: structs.CheckType{ 3671 TTL: 15 * time.Second, 3672 }, 3673 Connect: &structs.ServiceConnect{ 3674 Proxy: &structs.ServiceDefinitionConnectProxy{}, 3675 }, 3676 } 3677 3678 req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(reg)) 3679 resp := httptest.NewRecorder() 3680 _, err := a.srv.AgentRegisterService(resp, req) 3681 require.NoError(err) 3682 require.Equal(200, resp.Code, "body: %s", resp.Body.String()) 3683 } 3684 3685 // Get the proxy ID 3686 var proxyID string 3687 for _, p := range a.State.Proxies() { 3688 proxyID = p.Proxy.ProxyService.ID 3689 } 3690 3691 req, _ := http.NewRequest("PUT", "/v1/agent/service/deregister/"+proxyID, nil) 3692 obj, err := a.srv.AgentDeregisterService(nil, req) 3693 require.Error(err) 3694 require.Nil(obj) 3695 } 3696 3697 func TestAgent_ServiceMaintenance_BadRequest(t *testing.T) { 3698 t.Parallel() 3699 a := NewTestAgent(t, t.Name(), "") 3700 defer a.Shutdown() 3701 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 3702 3703 t.Run("not enabled", func(t *testing.T) { 3704 req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test", nil) 3705 resp := httptest.NewRecorder() 3706 if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil { 3707 t.Fatalf("err: %s", err) 3708 } 3709 if resp.Code != 400 { 3710 t.Fatalf("expected 400, got %d", resp.Code) 3711 } 3712 }) 3713 3714 t.Run("no service id", func(t *testing.T) { 3715 req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/?enable=true", nil) 3716 resp := httptest.NewRecorder() 3717 if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil { 3718 t.Fatalf("err: %s", err) 3719 } 3720 if resp.Code != 400 { 3721 t.Fatalf("expected 400, got %d", resp.Code) 3722 } 3723 }) 3724 3725 t.Run("bad service id", func(t *testing.T) { 3726 req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/_nope_?enable=true", nil) 3727 resp := httptest.NewRecorder() 3728 if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil { 3729 t.Fatalf("err: %s", err) 3730 } 3731 if resp.Code != 404 { 3732 t.Fatalf("expected 404, got %d", resp.Code) 3733 } 3734 }) 3735 } 3736 3737 func TestAgent_ServiceMaintenance_Enable(t *testing.T) { 3738 t.Parallel() 3739 a := NewTestAgent(t, t.Name(), "") 3740 defer a.Shutdown() 3741 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 3742 3743 // Register the service 3744 service := &structs.NodeService{ 3745 ID: "test", 3746 Service: "test", 3747 } 3748 if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil { 3749 t.Fatalf("err: %v", err) 3750 } 3751 3752 // Force the service into maintenance mode 3753 req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=true&reason=broken&token=mytoken", nil) 3754 resp := httptest.NewRecorder() 3755 if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil { 3756 t.Fatalf("err: %s", err) 3757 } 3758 if resp.Code != 200 { 3759 t.Fatalf("expected 200, got %d", resp.Code) 3760 } 3761 3762 // Ensure the maintenance check was registered 3763 checkID := serviceMaintCheckID("test") 3764 check, ok := a.State.Checks()[checkID] 3765 if !ok { 3766 t.Fatalf("should have registered maintenance check") 3767 } 3768 3769 // Ensure the token was added 3770 if token := a.State.CheckToken(checkID); token != "mytoken" { 3771 t.Fatalf("expected 'mytoken', got '%s'", token) 3772 } 3773 3774 // Ensure the reason was set in notes 3775 if check.Notes != "broken" { 3776 t.Fatalf("bad: %#v", check) 3777 } 3778 } 3779 3780 func TestAgent_ServiceMaintenance_Disable(t *testing.T) { 3781 t.Parallel() 3782 a := NewTestAgent(t, t.Name(), "") 3783 defer a.Shutdown() 3784 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 3785 3786 // Register the service 3787 service := &structs.NodeService{ 3788 ID: "test", 3789 Service: "test", 3790 } 3791 if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil { 3792 t.Fatalf("err: %v", err) 3793 } 3794 3795 // Force the service into maintenance mode 3796 if err := a.EnableServiceMaintenance("test", "", ""); err != nil { 3797 t.Fatalf("err: %s", err) 3798 } 3799 3800 // Leave maintenance mode 3801 req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=false", nil) 3802 resp := httptest.NewRecorder() 3803 if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil { 3804 t.Fatalf("err: %s", err) 3805 } 3806 if resp.Code != 200 { 3807 t.Fatalf("expected 200, got %d", resp.Code) 3808 } 3809 3810 // Ensure the maintenance check was removed 3811 checkID := serviceMaintCheckID("test") 3812 if _, ok := a.State.Checks()[checkID]; ok { 3813 t.Fatalf("should have removed maintenance check") 3814 } 3815 } 3816 3817 func TestAgent_ServiceMaintenance_ACLDeny(t *testing.T) { 3818 t.Parallel() 3819 a := NewTestAgent(t, t.Name(), TestACLConfig()) 3820 defer a.Shutdown() 3821 testrpc.WaitForLeader(t, a.RPC, "dc1") 3822 3823 // Register the service. 3824 service := &structs.NodeService{ 3825 ID: "test", 3826 Service: "test", 3827 } 3828 if err := a.AddService(service, nil, false, "", ConfigSourceLocal); err != nil { 3829 t.Fatalf("err: %v", err) 3830 } 3831 3832 t.Run("no token", func(t *testing.T) { 3833 req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=true&reason=broken", nil) 3834 if _, err := a.srv.AgentServiceMaintenance(nil, req); !acl.IsErrPermissionDenied(err) { 3835 t.Fatalf("err: %v", err) 3836 } 3837 }) 3838 3839 t.Run("root token", func(t *testing.T) { 3840 req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=true&reason=broken&token=root", nil) 3841 if _, err := a.srv.AgentServiceMaintenance(nil, req); err != nil { 3842 t.Fatalf("err: %v", err) 3843 } 3844 }) 3845 } 3846 3847 func TestAgent_NodeMaintenance_BadRequest(t *testing.T) { 3848 t.Parallel() 3849 a := NewTestAgent(t, t.Name(), "") 3850 defer a.Shutdown() 3851 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 3852 3853 // Fails when no enable flag provided 3854 req, _ := http.NewRequest("PUT", "/v1/agent/self/maintenance", nil) 3855 resp := httptest.NewRecorder() 3856 if _, err := a.srv.AgentNodeMaintenance(resp, req); err != nil { 3857 t.Fatalf("err: %s", err) 3858 } 3859 if resp.Code != 400 { 3860 t.Fatalf("expected 400, got %d", resp.Code) 3861 } 3862 } 3863 3864 func TestAgent_NodeMaintenance_Enable(t *testing.T) { 3865 t.Parallel() 3866 a := NewTestAgent(t, t.Name(), "") 3867 defer a.Shutdown() 3868 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 3869 3870 // Force the node into maintenance mode 3871 req, _ := http.NewRequest("PUT", "/v1/agent/self/maintenance?enable=true&reason=broken&token=mytoken", nil) 3872 resp := httptest.NewRecorder() 3873 if _, err := a.srv.AgentNodeMaintenance(resp, req); err != nil { 3874 t.Fatalf("err: %s", err) 3875 } 3876 if resp.Code != 200 { 3877 t.Fatalf("expected 200, got %d", resp.Code) 3878 } 3879 3880 // Ensure the maintenance check was registered 3881 check, ok := a.State.Checks()[structs.NodeMaint] 3882 if !ok { 3883 t.Fatalf("should have registered maintenance check") 3884 } 3885 3886 // Check that the token was used 3887 if token := a.State.CheckToken(structs.NodeMaint); token != "mytoken" { 3888 t.Fatalf("expected 'mytoken', got '%s'", token) 3889 } 3890 3891 // Ensure the reason was set in notes 3892 if check.Notes != "broken" { 3893 t.Fatalf("bad: %#v", check) 3894 } 3895 } 3896 3897 func TestAgent_NodeMaintenance_Disable(t *testing.T) { 3898 t.Parallel() 3899 a := NewTestAgent(t, t.Name(), "") 3900 defer a.Shutdown() 3901 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 3902 3903 // Force the node into maintenance mode 3904 a.EnableNodeMaintenance("", "") 3905 3906 // Leave maintenance mode 3907 req, _ := http.NewRequest("PUT", "/v1/agent/self/maintenance?enable=false", nil) 3908 resp := httptest.NewRecorder() 3909 if _, err := a.srv.AgentNodeMaintenance(resp, req); err != nil { 3910 t.Fatalf("err: %s", err) 3911 } 3912 if resp.Code != 200 { 3913 t.Fatalf("expected 200, got %d", resp.Code) 3914 } 3915 3916 // Ensure the maintenance check was removed 3917 if _, ok := a.State.Checks()[structs.NodeMaint]; ok { 3918 t.Fatalf("should have removed maintenance check") 3919 } 3920 } 3921 3922 func TestAgent_NodeMaintenance_ACLDeny(t *testing.T) { 3923 t.Parallel() 3924 a := NewTestAgent(t, t.Name(), TestACLConfig()) 3925 defer a.Shutdown() 3926 testrpc.WaitForLeader(t, a.RPC, "dc1") 3927 3928 t.Run("no token", func(t *testing.T) { 3929 req, _ := http.NewRequest("PUT", "/v1/agent/self/maintenance?enable=true&reason=broken", nil) 3930 if _, err := a.srv.AgentNodeMaintenance(nil, req); !acl.IsErrPermissionDenied(err) { 3931 t.Fatalf("err: %v", err) 3932 } 3933 }) 3934 3935 t.Run("root token", func(t *testing.T) { 3936 req, _ := http.NewRequest("PUT", "/v1/agent/self/maintenance?enable=true&reason=broken&token=root", nil) 3937 if _, err := a.srv.AgentNodeMaintenance(nil, req); err != nil { 3938 t.Fatalf("err: %v", err) 3939 } 3940 }) 3941 } 3942 3943 func TestAgent_RegisterCheck_Service(t *testing.T) { 3944 t.Parallel() 3945 a := NewTestAgent(t, t.Name(), "") 3946 defer a.Shutdown() 3947 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 3948 3949 args := &structs.ServiceDefinition{ 3950 Name: "memcache", 3951 Port: 8000, 3952 Check: structs.CheckType{ 3953 TTL: 15 * time.Second, 3954 }, 3955 } 3956 3957 // First register the service 3958 req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) 3959 if _, err := a.srv.AgentRegisterService(nil, req); err != nil { 3960 t.Fatalf("err: %v", err) 3961 } 3962 3963 // Now register an additional check 3964 checkArgs := &structs.CheckDefinition{ 3965 Name: "memcache_check2", 3966 ServiceID: "memcache", 3967 TTL: 15 * time.Second, 3968 } 3969 req, _ = http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(checkArgs)) 3970 if _, err := a.srv.AgentRegisterCheck(nil, req); err != nil { 3971 t.Fatalf("err: %v", err) 3972 } 3973 3974 // Ensure we have a check mapping 3975 result := a.State.Checks() 3976 if _, ok := result["service:memcache"]; !ok { 3977 t.Fatalf("missing memcached check") 3978 } 3979 if _, ok := result["memcache_check2"]; !ok { 3980 t.Fatalf("missing memcache_check2 check") 3981 } 3982 3983 // Make sure the new check is associated with the service 3984 if result["memcache_check2"].ServiceID != "memcache" { 3985 t.Fatalf("bad: %#v", result["memcached_check2"]) 3986 } 3987 } 3988 3989 func TestAgent_Monitor(t *testing.T) { 3990 t.Parallel() 3991 logWriter := logger.NewLogWriter(512) 3992 a := &TestAgent{ 3993 Name: t.Name(), 3994 LogWriter: logWriter, 3995 LogOutput: io.MultiWriter(os.Stderr, logWriter), 3996 } 3997 a.Start(t) 3998 defer a.Shutdown() 3999 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 4000 4001 // Try passing an invalid log level 4002 req, _ := http.NewRequest("GET", "/v1/agent/monitor?loglevel=invalid", nil) 4003 resp := newClosableRecorder() 4004 if _, err := a.srv.AgentMonitor(resp, req); err != nil { 4005 t.Fatalf("err: %v", err) 4006 } 4007 if resp.Code != 400 { 4008 t.Fatalf("bad: %v", resp.Code) 4009 } 4010 body, _ := ioutil.ReadAll(resp.Body) 4011 if !strings.Contains(string(body), "Unknown log level") { 4012 t.Fatalf("bad: %s", body) 4013 } 4014 4015 // Try to stream logs until we see the expected log line 4016 retry.Run(t, func(r *retry.R) { 4017 req, _ = http.NewRequest("GET", "/v1/agent/monitor?loglevel=debug", nil) 4018 resp = newClosableRecorder() 4019 done := make(chan struct{}) 4020 go func() { 4021 if _, err := a.srv.AgentMonitor(resp, req); err != nil { 4022 t.Fatalf("err: %s", err) 4023 } 4024 close(done) 4025 }() 4026 4027 resp.Close() 4028 <-done 4029 4030 got := resp.Body.Bytes() 4031 want := []byte("raft: Initial configuration (index=1)") 4032 if !bytes.Contains(got, want) { 4033 r.Fatalf("got %q and did not find %q", got, want) 4034 } 4035 }) 4036 } 4037 4038 type closableRecorder struct { 4039 *httptest.ResponseRecorder 4040 closer chan bool 4041 } 4042 4043 func newClosableRecorder() *closableRecorder { 4044 r := httptest.NewRecorder() 4045 closer := make(chan bool) 4046 return &closableRecorder{r, closer} 4047 } 4048 4049 func (r *closableRecorder) Close() { 4050 close(r.closer) 4051 } 4052 4053 func (r *closableRecorder) CloseNotify() <-chan bool { 4054 return r.closer 4055 } 4056 4057 func TestAgent_Monitor_ACLDeny(t *testing.T) { 4058 t.Parallel() 4059 a := NewTestAgent(t, t.Name(), TestACLConfig()) 4060 defer a.Shutdown() 4061 testrpc.WaitForLeader(t, a.RPC, "dc1") 4062 4063 // Try without a token. 4064 req, _ := http.NewRequest("GET", "/v1/agent/monitor", nil) 4065 if _, err := a.srv.AgentMonitor(nil, req); !acl.IsErrPermissionDenied(err) { 4066 t.Fatalf("err: %v", err) 4067 } 4068 4069 // This proves we call the ACL function, and we've got the other monitor 4070 // test to prove monitor works, which should be sufficient. The monitor 4071 // logic is a little complex to set up so isn't worth repeating again 4072 // here. 4073 } 4074 4075 func TestAgent_Token(t *testing.T) { 4076 t.Parallel() 4077 4078 // The behavior of this handler when ACLs are disabled is vetted over 4079 // in TestACL_Disabled_Response since there's already good infra set 4080 // up over there to test this, and it calls the common function. 4081 a := NewTestAgent(t, t.Name(), TestACLConfig()+` 4082 acl { 4083 tokens { 4084 default = "" 4085 agent = "" 4086 agent_master = "" 4087 replication = "" 4088 } 4089 } 4090 `) 4091 defer a.Shutdown() 4092 testrpc.WaitForLeader(t, a.RPC, "dc1") 4093 4094 type tokens struct { 4095 user string 4096 userSource tokenStore.TokenSource 4097 agent string 4098 agentSource tokenStore.TokenSource 4099 master string 4100 masterSource tokenStore.TokenSource 4101 repl string 4102 replSource tokenStore.TokenSource 4103 } 4104 4105 resetTokens := func(init tokens) { 4106 a.tokens.UpdateUserToken(init.user, init.userSource) 4107 a.tokens.UpdateAgentToken(init.agent, init.agentSource) 4108 a.tokens.UpdateAgentMasterToken(init.master, init.masterSource) 4109 a.tokens.UpdateReplicationToken(init.repl, init.replSource) 4110 } 4111 4112 body := func(token string) io.Reader { 4113 return jsonReader(&api.AgentToken{Token: token}) 4114 } 4115 4116 badJSON := func() io.Reader { 4117 return jsonReader(false) 4118 } 4119 4120 tests := []struct { 4121 name string 4122 method, url string 4123 body io.Reader 4124 code int 4125 init tokens 4126 raw tokens 4127 effective tokens 4128 }{ 4129 { 4130 name: "bad token name", 4131 method: "PUT", 4132 url: "nope?token=root", 4133 body: body("X"), 4134 code: http.StatusNotFound, 4135 }, 4136 { 4137 name: "bad JSON", 4138 method: "PUT", 4139 url: "acl_token?token=root", 4140 body: badJSON(), 4141 code: http.StatusBadRequest, 4142 }, 4143 { 4144 name: "set user legacy", 4145 method: "PUT", 4146 url: "acl_token?token=root", 4147 body: body("U"), 4148 code: http.StatusOK, 4149 raw: tokens{user: "U", userSource: tokenStore.TokenSourceAPI}, 4150 effective: tokens{user: "U", agent: "U"}, 4151 }, 4152 { 4153 name: "set default", 4154 method: "PUT", 4155 url: "default?token=root", 4156 body: body("U"), 4157 code: http.StatusOK, 4158 raw: tokens{user: "U", userSource: tokenStore.TokenSourceAPI}, 4159 effective: tokens{user: "U", agent: "U"}, 4160 }, 4161 { 4162 name: "set agent legacy", 4163 method: "PUT", 4164 url: "acl_agent_token?token=root", 4165 body: body("A"), 4166 code: http.StatusOK, 4167 init: tokens{user: "U", agent: "U"}, 4168 raw: tokens{user: "U", agent: "A", agentSource: tokenStore.TokenSourceAPI}, 4169 effective: tokens{user: "U", agent: "A"}, 4170 }, 4171 { 4172 name: "set agent", 4173 method: "PUT", 4174 url: "agent?token=root", 4175 body: body("A"), 4176 code: http.StatusOK, 4177 init: tokens{user: "U", agent: "U"}, 4178 raw: tokens{user: "U", agent: "A", agentSource: tokenStore.TokenSourceAPI}, 4179 effective: tokens{user: "U", agent: "A"}, 4180 }, 4181 { 4182 name: "set master legacy", 4183 method: "PUT", 4184 url: "acl_agent_master_token?token=root", 4185 body: body("M"), 4186 code: http.StatusOK, 4187 raw: tokens{master: "M", masterSource: tokenStore.TokenSourceAPI}, 4188 effective: tokens{master: "M"}, 4189 }, 4190 { 4191 name: "set master ", 4192 method: "PUT", 4193 url: "agent_master?token=root", 4194 body: body("M"), 4195 code: http.StatusOK, 4196 raw: tokens{master: "M", masterSource: tokenStore.TokenSourceAPI}, 4197 effective: tokens{master: "M"}, 4198 }, 4199 { 4200 name: "set repl legacy", 4201 method: "PUT", 4202 url: "acl_replication_token?token=root", 4203 body: body("R"), 4204 code: http.StatusOK, 4205 raw: tokens{repl: "R", replSource: tokenStore.TokenSourceAPI}, 4206 effective: tokens{repl: "R"}, 4207 }, 4208 { 4209 name: "set repl", 4210 method: "PUT", 4211 url: "replication?token=root", 4212 body: body("R"), 4213 code: http.StatusOK, 4214 raw: tokens{repl: "R", replSource: tokenStore.TokenSourceAPI}, 4215 effective: tokens{repl: "R"}, 4216 }, 4217 { 4218 name: "clear user legacy", 4219 method: "PUT", 4220 url: "acl_token?token=root", 4221 body: body(""), 4222 code: http.StatusOK, 4223 init: tokens{user: "U"}, 4224 raw: tokens{userSource: tokenStore.TokenSourceAPI}, 4225 }, 4226 { 4227 name: "clear default", 4228 method: "PUT", 4229 url: "default?token=root", 4230 body: body(""), 4231 code: http.StatusOK, 4232 init: tokens{user: "U"}, 4233 raw: tokens{userSource: tokenStore.TokenSourceAPI}, 4234 }, 4235 { 4236 name: "clear agent legacy", 4237 method: "PUT", 4238 url: "acl_agent_token?token=root", 4239 body: body(""), 4240 code: http.StatusOK, 4241 init: tokens{agent: "A"}, 4242 raw: tokens{agentSource: tokenStore.TokenSourceAPI}, 4243 }, 4244 { 4245 name: "clear agent", 4246 method: "PUT", 4247 url: "agent?token=root", 4248 body: body(""), 4249 code: http.StatusOK, 4250 init: tokens{agent: "A"}, 4251 raw: tokens{agentSource: tokenStore.TokenSourceAPI}, 4252 }, 4253 { 4254 name: "clear master legacy", 4255 method: "PUT", 4256 url: "acl_agent_master_token?token=root", 4257 body: body(""), 4258 code: http.StatusOK, 4259 init: tokens{master: "M"}, 4260 raw: tokens{masterSource: tokenStore.TokenSourceAPI}, 4261 }, 4262 { 4263 name: "clear master", 4264 method: "PUT", 4265 url: "agent_master?token=root", 4266 body: body(""), 4267 code: http.StatusOK, 4268 init: tokens{master: "M"}, 4269 raw: tokens{masterSource: tokenStore.TokenSourceAPI}, 4270 }, 4271 { 4272 name: "clear repl legacy", 4273 method: "PUT", 4274 url: "acl_replication_token?token=root", 4275 body: body(""), 4276 code: http.StatusOK, 4277 init: tokens{repl: "R"}, 4278 raw: tokens{replSource: tokenStore.TokenSourceAPI}, 4279 }, 4280 { 4281 name: "clear repl", 4282 method: "PUT", 4283 url: "replication?token=root", 4284 body: body(""), 4285 code: http.StatusOK, 4286 init: tokens{repl: "R"}, 4287 raw: tokens{replSource: tokenStore.TokenSourceAPI}, 4288 }, 4289 } 4290 for _, tt := range tests { 4291 t.Run(tt.name, func(t *testing.T) { 4292 resetTokens(tt.init) 4293 url := fmt.Sprintf("/v1/agent/token/%s", tt.url) 4294 resp := httptest.NewRecorder() 4295 req, _ := http.NewRequest(tt.method, url, tt.body) 4296 _, err := a.srv.AgentToken(resp, req) 4297 require.NoError(t, err) 4298 require.Equal(t, tt.code, resp.Code) 4299 require.Equal(t, tt.effective.user, a.tokens.UserToken()) 4300 require.Equal(t, tt.effective.agent, a.tokens.AgentToken()) 4301 require.Equal(t, tt.effective.master, a.tokens.AgentMasterToken()) 4302 require.Equal(t, tt.effective.repl, a.tokens.ReplicationToken()) 4303 4304 tok, src := a.tokens.UserTokenAndSource() 4305 require.Equal(t, tt.raw.user, tok) 4306 require.Equal(t, tt.raw.userSource, src) 4307 4308 tok, src = a.tokens.AgentTokenAndSource() 4309 require.Equal(t, tt.raw.agent, tok) 4310 require.Equal(t, tt.raw.agentSource, src) 4311 4312 tok, src = a.tokens.AgentMasterTokenAndSource() 4313 require.Equal(t, tt.raw.master, tok) 4314 require.Equal(t, tt.raw.masterSource, src) 4315 4316 tok, src = a.tokens.ReplicationTokenAndSource() 4317 require.Equal(t, tt.raw.repl, tok) 4318 require.Equal(t, tt.raw.replSource, src) 4319 }) 4320 } 4321 4322 // This one returns an error that is interpreted by the HTTP wrapper, so 4323 // doesn't fit into our table above. 4324 t.Run("permission denied", func(t *testing.T) { 4325 resetTokens(tokens{}) 4326 req, _ := http.NewRequest("PUT", "/v1/agent/token/acl_token", body("X")) 4327 _, err := a.srv.AgentToken(nil, req) 4328 require.True(t, acl.IsErrPermissionDenied(err)) 4329 require.Equal(t, "", a.tokens.UserToken()) 4330 }) 4331 } 4332 4333 func TestAgentConnectCARoots_empty(t *testing.T) { 4334 t.Parallel() 4335 4336 require := require.New(t) 4337 a := NewTestAgent(t, t.Name(), "connect { enabled = false }") 4338 defer a.Shutdown() 4339 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 4340 4341 req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil) 4342 resp := httptest.NewRecorder() 4343 _, err := a.srv.AgentConnectCARoots(resp, req) 4344 require.Error(err) 4345 require.Contains(err.Error(), "Connect must be enabled") 4346 } 4347 4348 func TestAgentConnectCARoots_list(t *testing.T) { 4349 t.Parallel() 4350 4351 assert := assert.New(t) 4352 require := require.New(t) 4353 a := NewTestAgent(t, t.Name(), "") 4354 defer a.Shutdown() 4355 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 4356 4357 // Set some CAs. Note that NewTestAgent already bootstraps one CA so this just 4358 // adds a second and makes it active. 4359 ca2 := connect.TestCAConfigSet(t, a, nil) 4360 4361 // List 4362 req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil) 4363 resp := httptest.NewRecorder() 4364 obj, err := a.srv.AgentConnectCARoots(resp, req) 4365 require.NoError(err) 4366 4367 value := obj.(structs.IndexedCARoots) 4368 assert.Equal(value.ActiveRootID, ca2.ID) 4369 // Would like to assert that it's the same as the TestAgent domain but the 4370 // only way to access that state via this package is by RPC to the server 4371 // implementation running in TestAgent which is more or less a tautology. 4372 assert.NotEmpty(value.TrustDomain) 4373 assert.Len(value.Roots, 2) 4374 4375 // We should never have the secret information 4376 for _, r := range value.Roots { 4377 assert.Equal("", r.SigningCert) 4378 assert.Equal("", r.SigningKey) 4379 } 4380 4381 assert.Equal("MISS", resp.Header().Get("X-Cache")) 4382 4383 // Test caching 4384 { 4385 // List it again 4386 resp2 := httptest.NewRecorder() 4387 obj2, err := a.srv.AgentConnectCARoots(resp2, req) 4388 require.NoError(err) 4389 assert.Equal(obj, obj2) 4390 4391 // Should cache hit this time and not make request 4392 assert.Equal("HIT", resp2.Header().Get("X-Cache")) 4393 } 4394 4395 // Test that caching is updated in the background 4396 { 4397 // Set a new CA 4398 ca := connect.TestCAConfigSet(t, a, nil) 4399 4400 retry.Run(t, func(r *retry.R) { 4401 // List it again 4402 resp := httptest.NewRecorder() 4403 obj, err := a.srv.AgentConnectCARoots(resp, req) 4404 r.Check(err) 4405 4406 value := obj.(structs.IndexedCARoots) 4407 if ca.ID != value.ActiveRootID { 4408 r.Fatalf("%s != %s", ca.ID, value.ActiveRootID) 4409 } 4410 // There are now 3 CAs because we didn't complete rotation on the original 4411 // 2 4412 if len(value.Roots) != 3 { 4413 r.Fatalf("bad len: %d", len(value.Roots)) 4414 } 4415 4416 // Should be a cache hit! The data should've updated in the cache 4417 // in the background so this should've been fetched directly from 4418 // the cache. 4419 if resp.Header().Get("X-Cache") != "HIT" { 4420 r.Fatalf("should be a cache hit") 4421 } 4422 }) 4423 } 4424 } 4425 4426 func TestAgentConnectCALeafCert_aclDefaultDeny(t *testing.T) { 4427 t.Parallel() 4428 4429 require := require.New(t) 4430 a := NewTestAgent(t, t.Name(), TestACLConfig()+testAllowProxyConfig()) 4431 defer a.Shutdown() 4432 testrpc.WaitForLeader(t, a.RPC, "dc1") 4433 4434 // Register a service with a managed proxy 4435 { 4436 reg := &structs.ServiceDefinition{ 4437 ID: "test-id", 4438 Name: "test", 4439 Address: "127.0.0.1", 4440 Port: 8000, 4441 Check: structs.CheckType{ 4442 TTL: 15 * time.Second, 4443 }, 4444 Connect: &structs.ServiceConnect{ 4445 Proxy: &structs.ServiceDefinitionConnectProxy{}, 4446 }, 4447 } 4448 4449 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) 4450 resp := httptest.NewRecorder() 4451 _, err := a.srv.AgentRegisterService(resp, req) 4452 require.NoError(err) 4453 require.Equal(200, resp.Code, "body: %s", resp.Body.String()) 4454 } 4455 4456 req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) 4457 resp := httptest.NewRecorder() 4458 _, err := a.srv.AgentConnectCALeafCert(resp, req) 4459 require.Error(err) 4460 require.True(acl.IsErrPermissionDenied(err)) 4461 } 4462 4463 func TestAgentConnectCALeafCert_aclProxyToken(t *testing.T) { 4464 t.Parallel() 4465 4466 require := require.New(t) 4467 a := NewTestAgent(t, t.Name(), TestACLConfig()+testAllowProxyConfig()) 4468 defer a.Shutdown() 4469 testrpc.WaitForLeader(t, a.RPC, "dc1") 4470 4471 // Register a service with a managed proxy 4472 { 4473 reg := &structs.ServiceDefinition{ 4474 ID: "test-id", 4475 Name: "test", 4476 Address: "127.0.0.1", 4477 Port: 8000, 4478 Check: structs.CheckType{ 4479 TTL: 15 * time.Second, 4480 }, 4481 Connect: &structs.ServiceConnect{ 4482 Proxy: &structs.ServiceDefinitionConnectProxy{}, 4483 }, 4484 } 4485 4486 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) 4487 resp := httptest.NewRecorder() 4488 _, err := a.srv.AgentRegisterService(resp, req) 4489 require.NoError(err) 4490 require.Equal(200, resp.Code, "body: %s", resp.Body.String()) 4491 } 4492 4493 // Get the proxy token from the agent directly, since there is no API. 4494 proxy := a.State.Proxy("test-id-proxy") 4495 require.NotNil(proxy) 4496 token := proxy.ProxyToken 4497 require.NotEmpty(token) 4498 4499 req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?token="+token, nil) 4500 resp := httptest.NewRecorder() 4501 obj, err := a.srv.AgentConnectCALeafCert(resp, req) 4502 require.NoError(err) 4503 4504 // Get the issued cert 4505 _, ok := obj.(*structs.IssuedCert) 4506 require.True(ok) 4507 } 4508 4509 func TestAgentConnectCALeafCert_aclProxyTokenOther(t *testing.T) { 4510 t.Parallel() 4511 4512 require := require.New(t) 4513 a := NewTestAgent(t, t.Name(), TestACLConfig()+testAllowProxyConfig()) 4514 defer a.Shutdown() 4515 testrpc.WaitForLeader(t, a.RPC, "dc1") 4516 4517 // Register a service with a managed proxy 4518 { 4519 reg := &structs.ServiceDefinition{ 4520 ID: "test-id", 4521 Name: "test", 4522 Address: "127.0.0.1", 4523 Port: 8000, 4524 Check: structs.CheckType{ 4525 TTL: 15 * time.Second, 4526 }, 4527 Connect: &structs.ServiceConnect{ 4528 Proxy: &structs.ServiceDefinitionConnectProxy{}, 4529 }, 4530 } 4531 4532 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) 4533 resp := httptest.NewRecorder() 4534 _, err := a.srv.AgentRegisterService(resp, req) 4535 require.NoError(err) 4536 require.Equal(200, resp.Code, "body: %s", resp.Body.String()) 4537 } 4538 4539 // Register another service 4540 { 4541 reg := &structs.ServiceDefinition{ 4542 ID: "wrong-id", 4543 Name: "wrong", 4544 Address: "127.0.0.1", 4545 Port: 8000, 4546 Check: structs.CheckType{ 4547 TTL: 15 * time.Second, 4548 }, 4549 Connect: &structs.ServiceConnect{ 4550 Proxy: &structs.ServiceDefinitionConnectProxy{}, 4551 }, 4552 } 4553 4554 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) 4555 resp := httptest.NewRecorder() 4556 _, err := a.srv.AgentRegisterService(resp, req) 4557 require.NoError(err) 4558 require.Equal(200, resp.Code, "body: %s", resp.Body.String()) 4559 } 4560 4561 // Get the proxy token from the agent directly, since there is no API. 4562 proxy := a.State.Proxy("wrong-id-proxy") 4563 require.NotNil(proxy) 4564 token := proxy.ProxyToken 4565 require.NotEmpty(token) 4566 4567 req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?token="+token, nil) 4568 resp := httptest.NewRecorder() 4569 _, err := a.srv.AgentConnectCALeafCert(resp, req) 4570 require.Error(err) 4571 require.True(acl.IsErrPermissionDenied(err)) 4572 } 4573 4574 func TestAgentConnectCALeafCert_aclServiceWrite(t *testing.T) { 4575 t.Parallel() 4576 4577 require := require.New(t) 4578 a := NewTestAgent(t, t.Name(), TestACLConfig()+testAllowProxyConfig()) 4579 defer a.Shutdown() 4580 testrpc.WaitForLeader(t, a.RPC, "dc1") 4581 4582 // Register a service with a managed proxy 4583 { 4584 reg := &structs.ServiceDefinition{ 4585 ID: "test-id", 4586 Name: "test", 4587 Address: "127.0.0.1", 4588 Port: 8000, 4589 Check: structs.CheckType{ 4590 TTL: 15 * time.Second, 4591 }, 4592 Connect: &structs.ServiceConnect{ 4593 Proxy: &structs.ServiceDefinitionConnectProxy{}, 4594 }, 4595 } 4596 4597 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) 4598 resp := httptest.NewRecorder() 4599 _, err := a.srv.AgentRegisterService(resp, req) 4600 require.NoError(err) 4601 require.Equal(200, resp.Code, "body: %s", resp.Body.String()) 4602 } 4603 4604 // Create an ACL with service:write for our service 4605 var token string 4606 { 4607 args := map[string]interface{}{ 4608 "Name": "User Token", 4609 "Type": "client", 4610 "Rules": `service "test" { policy = "write" }`, 4611 } 4612 req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args)) 4613 resp := httptest.NewRecorder() 4614 obj, err := a.srv.ACLCreate(resp, req) 4615 if err != nil { 4616 t.Fatalf("err: %v", err) 4617 } 4618 aclResp := obj.(aclCreateResponse) 4619 token = aclResp.ID 4620 } 4621 4622 req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?token="+token, nil) 4623 resp := httptest.NewRecorder() 4624 obj, err := a.srv.AgentConnectCALeafCert(resp, req) 4625 require.NoError(err) 4626 4627 // Get the issued cert 4628 _, ok := obj.(*structs.IssuedCert) 4629 require.True(ok) 4630 } 4631 4632 func TestAgentConnectCALeafCert_aclServiceReadDeny(t *testing.T) { 4633 t.Parallel() 4634 4635 require := require.New(t) 4636 a := NewTestAgent(t, t.Name(), TestACLConfig()+testAllowProxyConfig()) 4637 defer a.Shutdown() 4638 testrpc.WaitForLeader(t, a.RPC, "dc1") 4639 4640 // Register a service with a managed proxy 4641 { 4642 reg := &structs.ServiceDefinition{ 4643 ID: "test-id", 4644 Name: "test", 4645 Address: "127.0.0.1", 4646 Port: 8000, 4647 Check: structs.CheckType{ 4648 TTL: 15 * time.Second, 4649 }, 4650 Connect: &structs.ServiceConnect{ 4651 Proxy: &structs.ServiceDefinitionConnectProxy{}, 4652 }, 4653 } 4654 4655 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) 4656 resp := httptest.NewRecorder() 4657 _, err := a.srv.AgentRegisterService(resp, req) 4658 require.NoError(err) 4659 require.Equal(200, resp.Code, "body: %s", resp.Body.String()) 4660 } 4661 4662 // Create an ACL with service:read for our service 4663 var token string 4664 { 4665 args := map[string]interface{}{ 4666 "Name": "User Token", 4667 "Type": "client", 4668 "Rules": `service "test" { policy = "read" }`, 4669 } 4670 req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args)) 4671 resp := httptest.NewRecorder() 4672 obj, err := a.srv.ACLCreate(resp, req) 4673 if err != nil { 4674 t.Fatalf("err: %v", err) 4675 } 4676 aclResp := obj.(aclCreateResponse) 4677 token = aclResp.ID 4678 } 4679 4680 req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?token="+token, nil) 4681 resp := httptest.NewRecorder() 4682 _, err := a.srv.AgentConnectCALeafCert(resp, req) 4683 require.Error(err) 4684 require.True(acl.IsErrPermissionDenied(err)) 4685 } 4686 4687 func TestAgentConnectCALeafCert_good(t *testing.T) { 4688 t.Parallel() 4689 4690 assert := assert.New(t) 4691 require := require.New(t) 4692 a := NewTestAgent(t, t.Name(), "") 4693 defer a.Shutdown() 4694 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 4695 4696 // CA already setup by default by NewTestAgent but force a new one so we can 4697 // verify it was signed easily. 4698 ca1 := connect.TestCAConfigSet(t, a, nil) 4699 4700 { 4701 // Register a local service 4702 args := &structs.ServiceDefinition{ 4703 ID: "foo", 4704 Name: "test", 4705 Address: "127.0.0.1", 4706 Port: 8000, 4707 Check: structs.CheckType{ 4708 TTL: 15 * time.Second, 4709 }, 4710 } 4711 req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) 4712 resp := httptest.NewRecorder() 4713 _, err := a.srv.AgentRegisterService(resp, req) 4714 require.NoError(err) 4715 if !assert.Equal(200, resp.Code) { 4716 t.Log("Body: ", resp.Body.String()) 4717 } 4718 } 4719 4720 // List 4721 req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) 4722 resp := httptest.NewRecorder() 4723 obj, err := a.srv.AgentConnectCALeafCert(resp, req) 4724 require.NoError(err) 4725 require.Equal("MISS", resp.Header().Get("X-Cache")) 4726 4727 // Get the issued cert 4728 issued, ok := obj.(*structs.IssuedCert) 4729 assert.True(ok) 4730 4731 // Verify that the cert is signed by the CA 4732 requireLeafValidUnderCA(t, issued, ca1) 4733 4734 // Verify blocking index 4735 assert.True(issued.ModifyIndex > 0) 4736 assert.Equal(fmt.Sprintf("%d", issued.ModifyIndex), 4737 resp.Header().Get("X-Consul-Index")) 4738 4739 // Test caching 4740 { 4741 // Fetch it again 4742 resp := httptest.NewRecorder() 4743 obj2, err := a.srv.AgentConnectCALeafCert(resp, req) 4744 require.NoError(err) 4745 require.Equal(obj, obj2) 4746 4747 // Should cache hit this time and not make request 4748 require.Equal("HIT", resp.Header().Get("X-Cache")) 4749 } 4750 4751 // Test that caching is updated in the background 4752 { 4753 // Set a new CA 4754 ca := connect.TestCAConfigSet(t, a, nil) 4755 4756 retry.Run(t, func(r *retry.R) { 4757 resp := httptest.NewRecorder() 4758 // Try and sign again (note no index/wait arg since cache should update in 4759 // background even if we aren't actively blocking) 4760 obj, err := a.srv.AgentConnectCALeafCert(resp, req) 4761 r.Check(err) 4762 4763 issued2 := obj.(*structs.IssuedCert) 4764 if issued.CertPEM == issued2.CertPEM { 4765 r.Fatalf("leaf has not updated") 4766 } 4767 4768 // Got a new leaf. Sanity check it's a whole new key as well as different 4769 // cert. 4770 if issued.PrivateKeyPEM == issued2.PrivateKeyPEM { 4771 r.Fatalf("new leaf has same private key as before") 4772 } 4773 4774 // Verify that the cert is signed by the new CA 4775 requireLeafValidUnderCA(t, issued2, ca) 4776 4777 // Should be a cache hit! The data should've updated in the cache 4778 // in the background so this should've been fetched directly from 4779 // the cache. 4780 if resp.Header().Get("X-Cache") != "HIT" { 4781 r.Fatalf("should be a cache hit") 4782 } 4783 }) 4784 } 4785 } 4786 4787 // Test we can request a leaf cert for a service we have permission for 4788 // but is not local to this agent. 4789 func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) { 4790 t.Parallel() 4791 4792 assert := assert.New(t) 4793 require := require.New(t) 4794 a := NewTestAgent(t, t.Name(), "") 4795 defer a.Shutdown() 4796 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 4797 4798 // CA already setup by default by NewTestAgent but force a new one so we can 4799 // verify it was signed easily. 4800 ca1 := connect.TestCAConfigSet(t, a, nil) 4801 4802 { 4803 // Register a non-local service (central catalog) 4804 args := &structs.RegisterRequest{ 4805 Node: "foo", 4806 Address: "127.0.0.1", 4807 Service: &structs.NodeService{ 4808 Service: "test", 4809 Address: "127.0.0.1", 4810 Port: 8080, 4811 }, 4812 } 4813 req, _ := http.NewRequest("PUT", "/v1/catalog/register", jsonReader(args)) 4814 resp := httptest.NewRecorder() 4815 _, err := a.srv.CatalogRegister(resp, req) 4816 require.NoError(err) 4817 if !assert.Equal(200, resp.Code) { 4818 t.Log("Body: ", resp.Body.String()) 4819 } 4820 } 4821 4822 // List 4823 req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) 4824 resp := httptest.NewRecorder() 4825 obj, err := a.srv.AgentConnectCALeafCert(resp, req) 4826 require.NoError(err) 4827 require.Equal("MISS", resp.Header().Get("X-Cache")) 4828 4829 // Get the issued cert 4830 issued, ok := obj.(*structs.IssuedCert) 4831 assert.True(ok) 4832 4833 // Verify that the cert is signed by the CA 4834 requireLeafValidUnderCA(t, issued, ca1) 4835 4836 // Verify blocking index 4837 assert.True(issued.ModifyIndex > 0) 4838 assert.Equal(fmt.Sprintf("%d", issued.ModifyIndex), 4839 resp.Header().Get("X-Consul-Index")) 4840 4841 // Test caching 4842 { 4843 // Fetch it again 4844 resp := httptest.NewRecorder() 4845 obj2, err := a.srv.AgentConnectCALeafCert(resp, req) 4846 require.NoError(err) 4847 require.Equal(obj, obj2) 4848 4849 // Should cache hit this time and not make request 4850 require.Equal("HIT", resp.Header().Get("X-Cache")) 4851 } 4852 4853 // Test Blocking - see https://github.com/hashicorp/consul/issues/4462 4854 { 4855 // Fetch it again 4856 resp := httptest.NewRecorder() 4857 blockingReq, _ := http.NewRequest("GET", fmt.Sprintf("/v1/agent/connect/ca/leaf/test?wait=125ms&index=%d", issued.ModifyIndex), nil) 4858 doneCh := make(chan struct{}) 4859 go func() { 4860 a.srv.AgentConnectCALeafCert(resp, blockingReq) 4861 close(doneCh) 4862 }() 4863 4864 select { 4865 case <-time.After(500 * time.Millisecond): 4866 require.FailNow("Shouldn't block for this long - not respecting wait parameter in the query") 4867 4868 case <-doneCh: 4869 } 4870 } 4871 4872 // Test that caching is updated in the background 4873 { 4874 // Set a new CA 4875 ca := connect.TestCAConfigSet(t, a, nil) 4876 4877 retry.Run(t, func(r *retry.R) { 4878 resp := httptest.NewRecorder() 4879 // Try and sign again (note no index/wait arg since cache should update in 4880 // background even if we aren't actively blocking) 4881 obj, err := a.srv.AgentConnectCALeafCert(resp, req) 4882 r.Check(err) 4883 4884 issued2 := obj.(*structs.IssuedCert) 4885 if issued.CertPEM == issued2.CertPEM { 4886 r.Fatalf("leaf has not updated") 4887 } 4888 4889 // Got a new leaf. Sanity check it's a whole new key as well as different 4890 // cert. 4891 if issued.PrivateKeyPEM == issued2.PrivateKeyPEM { 4892 r.Fatalf("new leaf has same private key as before") 4893 } 4894 4895 // Verify that the cert is signed by the new CA 4896 requireLeafValidUnderCA(t, issued2, ca) 4897 4898 // Should be a cache hit! The data should've updated in the cache 4899 // in the background so this should've been fetched directly from 4900 // the cache. 4901 if resp.Header().Get("X-Cache") != "HIT" { 4902 r.Fatalf("should be a cache hit") 4903 } 4904 }) 4905 } 4906 } 4907 4908 func requireLeafValidUnderCA(t *testing.T, issued *structs.IssuedCert, 4909 ca *structs.CARoot) { 4910 4911 roots := x509.NewCertPool() 4912 require.True(t, roots.AppendCertsFromPEM([]byte(ca.RootCert))) 4913 leaf, err := connect.ParseCert(issued.CertPEM) 4914 require.NoError(t, err) 4915 _, err = leaf.Verify(x509.VerifyOptions{ 4916 Roots: roots, 4917 }) 4918 require.NoError(t, err) 4919 4920 // Verify the private key matches. tls.LoadX509Keypair does this for us! 4921 _, err = tls.X509KeyPair([]byte(issued.CertPEM), []byte(issued.PrivateKeyPEM)) 4922 require.NoError(t, err) 4923 } 4924 4925 func TestAgentConnectProxyConfig_Blocking(t *testing.T) { 4926 t.Parallel() 4927 4928 a := NewTestAgent(t, t.Name(), testAllowProxyConfig()) 4929 defer a.Shutdown() 4930 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 4931 4932 // Define a local service with a managed proxy. It's registered in the test 4933 // loop to make sure agent state is predictable whatever order tests execute 4934 // since some alter this service config. 4935 reg := &structs.ServiceDefinition{ 4936 Name: "test", 4937 Address: "127.0.0.1", 4938 Port: 8000, 4939 Check: structs.CheckType{ 4940 TTL: 15 * time.Second, 4941 }, 4942 Connect: &structs.ServiceConnect{ 4943 Proxy: &structs.ServiceDefinitionConnectProxy{ 4944 Command: []string{"tubes.sh"}, 4945 Config: map[string]interface{}{ 4946 "bind_port": 1234, 4947 "connect_timeout_ms": 500, 4948 // Specify upstreams in deprecated nested config way here. We test the 4949 // new way in the update case below. 4950 "upstreams": []map[string]interface{}{ 4951 { 4952 "destination_name": "db", 4953 "local_bind_port": 3131, 4954 }, 4955 }, 4956 }, 4957 }, 4958 }, 4959 } 4960 4961 expectedResponse := &api.ConnectProxyConfig{ 4962 ProxyServiceID: "test-proxy", 4963 TargetServiceID: "test", 4964 TargetServiceName: "test", 4965 ContentHash: "a7c93585b6d70445", 4966 ExecMode: "daemon", 4967 Command: []string{"tubes.sh"}, 4968 Config: map[string]interface{}{ 4969 "bind_address": "127.0.0.1", 4970 "local_service_address": "127.0.0.1:8000", 4971 "bind_port": int(1234), 4972 "connect_timeout_ms": float64(500), 4973 }, 4974 Upstreams: []api.Upstream{ 4975 { 4976 DestinationType: "service", 4977 DestinationName: "db", 4978 LocalBindPort: 3131, 4979 }, 4980 }, 4981 } 4982 4983 ur, err := copystructure.Copy(expectedResponse) 4984 require.NoError(t, err) 4985 updatedResponse := ur.(*api.ConnectProxyConfig) 4986 updatedResponse.ContentHash = "aedc0ca0f3f7794e" 4987 updatedResponse.Upstreams = append(updatedResponse.Upstreams, api.Upstream{ 4988 DestinationType: "service", 4989 DestinationName: "cache", 4990 LocalBindPort: 4242, 4991 Config: map[string]interface{}{ 4992 "connect_timeout_ms": float64(1000), 4993 }, 4994 }) 4995 4996 tests := []struct { 4997 name string 4998 url string 4999 updateFunc func() 5000 wantWait time.Duration 5001 wantCode int 5002 wantErr bool 5003 wantResp *api.ConnectProxyConfig 5004 }{ 5005 { 5006 name: "simple fetch", 5007 url: "/v1/agent/connect/proxy/test-proxy", 5008 wantCode: 200, 5009 wantErr: false, 5010 wantResp: expectedResponse, 5011 }, 5012 { 5013 name: "blocking fetch timeout, no change", 5014 url: "/v1/agent/connect/proxy/test-proxy?hash=" + expectedResponse.ContentHash + "&wait=100ms", 5015 wantWait: 100 * time.Millisecond, 5016 wantCode: 200, 5017 wantErr: false, 5018 wantResp: expectedResponse, 5019 }, 5020 { 5021 name: "blocking fetch old hash should return immediately", 5022 url: "/v1/agent/connect/proxy/test-proxy?hash=123456789abcd&wait=10m", 5023 wantCode: 200, 5024 wantErr: false, 5025 wantResp: expectedResponse, 5026 }, 5027 { 5028 name: "blocking fetch returns change", 5029 url: "/v1/agent/connect/proxy/test-proxy?hash=" + expectedResponse.ContentHash, 5030 updateFunc: func() { 5031 time.Sleep(100 * time.Millisecond) 5032 // Re-register with new proxy config 5033 r2, err := copystructure.Copy(reg) 5034 require.NoError(t, err) 5035 reg2 := r2.(*structs.ServiceDefinition) 5036 reg2.Connect.Proxy.Upstreams = structs.UpstreamsFromAPI(updatedResponse.Upstreams) 5037 req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(r2)) 5038 resp := httptest.NewRecorder() 5039 _, err = a.srv.AgentRegisterService(resp, req) 5040 require.NoError(t, err) 5041 require.Equal(t, 200, resp.Code, "body: %s", resp.Body.String()) 5042 }, 5043 wantWait: 100 * time.Millisecond, 5044 wantCode: 200, 5045 wantErr: false, 5046 wantResp: updatedResponse, 5047 }, 5048 { 5049 // This test exercises a case that caused a busy loop to eat CPU for the 5050 // entire duration of the blocking query. If a service gets re-registered 5051 // wth same proxy config then the old proxy config chan is closed causing 5052 // blocked watchset.Watch to return false indicating a change. But since 5053 // the hash is the same when the blocking fn is re-called we should just 5054 // keep blocking on the next iteration. The bug hit was that the WatchSet 5055 // ws was not being reset in the loop and so when you try to `Watch` it 5056 // the second time it just returns immediately making the blocking loop 5057 // into a busy-poll! 5058 // 5059 // This test though doesn't catch that because busy poll still has the 5060 // correct external behavior. I don't want to instrument the loop to 5061 // assert it's not executing too fast here as I can't think of a clean way 5062 // and the issue is fixed now so this test doesn't actually catch the 5063 // error, but does provide an easy way to verify the behavior by hand: 5064 // 1. Make this test fail e.g. change wantErr to true 5065 // 2. Add a log.Println or similar into the blocking loop/function 5066 // 3. See whether it's called just once or many times in a tight loop. 5067 name: "blocking fetch interrupted with no change (same hash)", 5068 url: "/v1/agent/connect/proxy/test-proxy?wait=200ms&hash=" + expectedResponse.ContentHash, 5069 updateFunc: func() { 5070 time.Sleep(100 * time.Millisecond) 5071 // Re-register with _same_ proxy config 5072 req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(reg)) 5073 resp := httptest.NewRecorder() 5074 _, err = a.srv.AgentRegisterService(resp, req) 5075 require.NoError(t, err) 5076 require.Equal(t, 200, resp.Code, "body: %s", resp.Body.String()) 5077 }, 5078 wantWait: 200 * time.Millisecond, 5079 wantCode: 200, 5080 wantErr: false, 5081 wantResp: expectedResponse, 5082 }, 5083 } 5084 5085 for _, tt := range tests { 5086 t.Run(tt.name, func(t *testing.T) { 5087 assert := assert.New(t) 5088 require := require.New(t) 5089 5090 // Register the basic service to ensure it's in a known state to start. 5091 { 5092 req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(reg)) 5093 resp := httptest.NewRecorder() 5094 _, err := a.srv.AgentRegisterService(resp, req) 5095 require.NoError(err) 5096 require.Equal(200, resp.Code, "body: %s", resp.Body.String()) 5097 } 5098 5099 req, _ := http.NewRequest("GET", tt.url, nil) 5100 resp := httptest.NewRecorder() 5101 if tt.updateFunc != nil { 5102 go tt.updateFunc() 5103 } 5104 start := time.Now() 5105 obj, err := a.srv.AgentConnectProxyConfig(resp, req) 5106 elapsed := time.Now().Sub(start) 5107 5108 if tt.wantErr { 5109 require.Error(err) 5110 } else { 5111 require.NoError(err) 5112 } 5113 if tt.wantCode != 0 { 5114 require.Equal(tt.wantCode, resp.Code, "body: %s", resp.Body.String()) 5115 } 5116 if tt.wantWait != 0 { 5117 assert.True(elapsed >= tt.wantWait, "should have waited at least %s, "+ 5118 "took %s", tt.wantWait, elapsed) 5119 } else { 5120 assert.True(elapsed < 10*time.Millisecond, "should not have waited, "+ 5121 "took %s", elapsed) 5122 } 5123 5124 assert.Equal(tt.wantResp, obj) 5125 5126 assert.Equal(tt.wantResp.ContentHash, resp.Header().Get("X-Consul-ContentHash")) 5127 }) 5128 } 5129 } 5130 5131 func TestAgentConnectProxyConfig_aclDefaultDeny(t *testing.T) { 5132 t.Parallel() 5133 5134 require := require.New(t) 5135 a := NewTestAgent(t, t.Name(), TestACLConfig()+testAllowProxyConfig()) 5136 defer a.Shutdown() 5137 testrpc.WaitForLeader(t, a.RPC, "dc1") 5138 5139 // Register a service with a managed proxy 5140 { 5141 reg := &structs.ServiceDefinition{ 5142 ID: "test-id", 5143 Name: "test", 5144 Address: "127.0.0.1", 5145 Port: 8000, 5146 Check: structs.CheckType{ 5147 TTL: 15 * time.Second, 5148 }, 5149 Connect: &structs.ServiceConnect{ 5150 Proxy: &structs.ServiceDefinitionConnectProxy{}, 5151 }, 5152 } 5153 5154 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) 5155 resp := httptest.NewRecorder() 5156 _, err := a.srv.AgentRegisterService(resp, req) 5157 require.NoError(err) 5158 require.Equal(200, resp.Code, "body: %s", resp.Body.String()) 5159 } 5160 5161 req, _ := http.NewRequest("GET", "/v1/agent/connect/proxy/test-id-proxy", nil) 5162 resp := httptest.NewRecorder() 5163 _, err := a.srv.AgentConnectProxyConfig(resp, req) 5164 require.True(acl.IsErrPermissionDenied(err)) 5165 } 5166 5167 func TestAgentConnectProxyConfig_aclProxyToken(t *testing.T) { 5168 t.Parallel() 5169 5170 require := require.New(t) 5171 a := NewTestAgent(t, t.Name(), TestACLConfig()+testAllowProxyConfig()) 5172 defer a.Shutdown() 5173 testrpc.WaitForLeader(t, a.RPC, "dc1") 5174 5175 // Register a service with a managed proxy 5176 { 5177 reg := &structs.ServiceDefinition{ 5178 ID: "test-id", 5179 Name: "test", 5180 Address: "127.0.0.1", 5181 Port: 8000, 5182 Check: structs.CheckType{ 5183 TTL: 15 * time.Second, 5184 }, 5185 Connect: &structs.ServiceConnect{ 5186 Proxy: &structs.ServiceDefinitionConnectProxy{}, 5187 }, 5188 } 5189 5190 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) 5191 resp := httptest.NewRecorder() 5192 _, err := a.srv.AgentRegisterService(resp, req) 5193 require.NoError(err) 5194 require.Equal(200, resp.Code, "body: %s", resp.Body.String()) 5195 } 5196 5197 // Get the proxy token from the agent directly, since there is no API 5198 // to expose this. 5199 proxy := a.State.Proxy("test-id-proxy") 5200 require.NotNil(proxy) 5201 token := proxy.ProxyToken 5202 require.NotEmpty(token) 5203 5204 req, _ := http.NewRequest( 5205 "GET", "/v1/agent/connect/proxy/test-id-proxy?token="+token, nil) 5206 resp := httptest.NewRecorder() 5207 obj, err := a.srv.AgentConnectProxyConfig(resp, req) 5208 require.NoError(err) 5209 proxyCfg := obj.(*api.ConnectProxyConfig) 5210 require.Equal("test-id-proxy", proxyCfg.ProxyServiceID) 5211 require.Equal("test-id", proxyCfg.TargetServiceID) 5212 require.Equal("test", proxyCfg.TargetServiceName) 5213 } 5214 5215 func TestAgentConnectProxyConfig_aclServiceWrite(t *testing.T) { 5216 t.Parallel() 5217 5218 require := require.New(t) 5219 a := NewTestAgent(t, t.Name(), TestACLConfig()+testAllowProxyConfig()) 5220 defer a.Shutdown() 5221 testrpc.WaitForLeader(t, a.RPC, "dc1") 5222 5223 // Register a service with a managed proxy 5224 { 5225 reg := &structs.ServiceDefinition{ 5226 ID: "test-id", 5227 Name: "test", 5228 Address: "127.0.0.1", 5229 Port: 8000, 5230 Check: structs.CheckType{ 5231 TTL: 15 * time.Second, 5232 }, 5233 Connect: &structs.ServiceConnect{ 5234 Proxy: &structs.ServiceDefinitionConnectProxy{}, 5235 }, 5236 } 5237 5238 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) 5239 resp := httptest.NewRecorder() 5240 _, err := a.srv.AgentRegisterService(resp, req) 5241 require.NoError(err) 5242 require.Equal(200, resp.Code, "body: %s", resp.Body.String()) 5243 } 5244 5245 // Create an ACL with service:write for our service 5246 var token string 5247 { 5248 args := map[string]interface{}{ 5249 "Name": "User Token", 5250 "Type": "client", 5251 "Rules": `service "test" { policy = "write" }`, 5252 } 5253 req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args)) 5254 resp := httptest.NewRecorder() 5255 obj, err := a.srv.ACLCreate(resp, req) 5256 if err != nil { 5257 t.Fatalf("err: %v", err) 5258 } 5259 aclResp := obj.(aclCreateResponse) 5260 token = aclResp.ID 5261 } 5262 5263 req, _ := http.NewRequest( 5264 "GET", "/v1/agent/connect/proxy/test-id-proxy?token="+token, nil) 5265 resp := httptest.NewRecorder() 5266 obj, err := a.srv.AgentConnectProxyConfig(resp, req) 5267 require.NoError(err) 5268 proxyCfg := obj.(*api.ConnectProxyConfig) 5269 require.Equal("test-id-proxy", proxyCfg.ProxyServiceID) 5270 require.Equal("test-id", proxyCfg.TargetServiceID) 5271 require.Equal("test", proxyCfg.TargetServiceName) 5272 } 5273 5274 func TestAgentConnectProxyConfig_aclServiceReadDeny(t *testing.T) { 5275 t.Parallel() 5276 5277 require := require.New(t) 5278 a := NewTestAgent(t, t.Name(), TestACLConfig()+testAllowProxyConfig()) 5279 defer a.Shutdown() 5280 5281 testrpc.WaitForLeader(t, a.RPC, "dc1") 5282 // Register a service with a managed proxy 5283 { 5284 reg := &structs.ServiceDefinition{ 5285 ID: "test-id", 5286 Name: "test", 5287 Address: "127.0.0.1", 5288 Port: 8000, 5289 Check: structs.CheckType{ 5290 TTL: 15 * time.Second, 5291 }, 5292 Connect: &structs.ServiceConnect{ 5293 Proxy: &structs.ServiceDefinitionConnectProxy{}, 5294 }, 5295 } 5296 5297 req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) 5298 resp := httptest.NewRecorder() 5299 _, err := a.srv.AgentRegisterService(resp, req) 5300 require.NoError(err) 5301 require.Equal(200, resp.Code, "body: %s", resp.Body.String()) 5302 } 5303 5304 // Create an ACL with service:read for our service 5305 var token string 5306 { 5307 args := map[string]interface{}{ 5308 "Name": "User Token", 5309 "Type": "client", 5310 "Rules": `service "test" { policy = "read" }`, 5311 } 5312 req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args)) 5313 resp := httptest.NewRecorder() 5314 obj, err := a.srv.ACLCreate(resp, req) 5315 if err != nil { 5316 t.Fatalf("err: %v", err) 5317 } 5318 aclResp := obj.(aclCreateResponse) 5319 token = aclResp.ID 5320 } 5321 5322 req, _ := http.NewRequest( 5323 "GET", "/v1/agent/connect/proxy/test-id-proxy?token="+token, nil) 5324 resp := httptest.NewRecorder() 5325 _, err := a.srv.AgentConnectProxyConfig(resp, req) 5326 require.True(acl.IsErrPermissionDenied(err)) 5327 } 5328 5329 func makeTelemetryDefaults(targetID string) lib.TelemetryConfig { 5330 return lib.TelemetryConfig{ 5331 FilterDefault: true, 5332 MetricsPrefix: "consul.proxy." + targetID, 5333 } 5334 } 5335 5336 func TestAgentConnectProxyConfig_ConfigHandling(t *testing.T) { 5337 t.Parallel() 5338 5339 // Get the default command to compare below 5340 defaultCommand, err := defaultProxyCommand(nil) 5341 require.NoError(t, err) 5342 5343 // Define a local service with a managed proxy. It's registered in the test 5344 // loop to make sure agent state is predictable whatever order tests execute 5345 // since some alter this service config. 5346 reg := &structs.ServiceDefinition{ 5347 ID: "test-id", 5348 Name: "test", 5349 Address: "127.0.0.1", 5350 Port: 8000, 5351 Check: structs.CheckType{ 5352 TTL: 15 * time.Second, 5353 }, 5354 Connect: &structs.ServiceConnect{ 5355 // Proxy is populated with the definition in the table below. 5356 }, 5357 } 5358 5359 tests := []struct { 5360 name string 5361 globalConfig string 5362 proxy structs.ServiceDefinitionConnectProxy 5363 useToken string 5364 wantMode api.ProxyExecMode 5365 wantCommand []string 5366 wantConfig map[string]interface{} 5367 }{ 5368 { 5369 name: "defaults", 5370 globalConfig: ` 5371 bind_addr = "0.0.0.0" 5372 connect { 5373 enabled = true 5374 proxy { 5375 allow_managed_api_registration = true 5376 } 5377 } 5378 ports { 5379 proxy_min_port = 10000 5380 proxy_max_port = 10000 5381 } 5382 `, 5383 proxy: structs.ServiceDefinitionConnectProxy{}, 5384 wantMode: api.ProxyExecModeDaemon, 5385 wantCommand: defaultCommand, 5386 wantConfig: map[string]interface{}{ 5387 "bind_address": "0.0.0.0", 5388 "bind_port": 10000, // "randomly" chosen from our range of 1 5389 "local_service_address": "127.0.0.1:8000", // port from service reg 5390 "telemetry": makeTelemetryDefaults(reg.ID), 5391 }, 5392 }, 5393 { 5394 name: "global defaults - script", 5395 globalConfig: ` 5396 bind_addr = "0.0.0.0" 5397 connect { 5398 enabled = true 5399 proxy { 5400 allow_managed_api_registration = true 5401 } 5402 proxy_defaults = { 5403 exec_mode = "script" 5404 script_command = ["script.sh"] 5405 } 5406 } 5407 ports { 5408 proxy_min_port = 10000 5409 proxy_max_port = 10000 5410 } 5411 `, 5412 proxy: structs.ServiceDefinitionConnectProxy{}, 5413 wantMode: api.ProxyExecModeScript, 5414 wantCommand: []string{"script.sh"}, 5415 wantConfig: map[string]interface{}{ 5416 "bind_address": "0.0.0.0", 5417 "bind_port": 10000, // "randomly" chosen from our range of 1 5418 "local_service_address": "127.0.0.1:8000", // port from service reg 5419 "telemetry": makeTelemetryDefaults(reg.ID), 5420 }, 5421 }, 5422 { 5423 name: "global defaults - daemon", 5424 globalConfig: ` 5425 bind_addr = "0.0.0.0" 5426 connect { 5427 enabled = true 5428 proxy { 5429 allow_managed_api_registration = true 5430 } 5431 proxy_defaults = { 5432 exec_mode = "daemon" 5433 daemon_command = ["daemon.sh"] 5434 } 5435 } 5436 ports { 5437 proxy_min_port = 10000 5438 proxy_max_port = 10000 5439 } 5440 `, 5441 proxy: structs.ServiceDefinitionConnectProxy{}, 5442 wantMode: api.ProxyExecModeDaemon, 5443 wantCommand: []string{"daemon.sh"}, 5444 wantConfig: map[string]interface{}{ 5445 "bind_address": "0.0.0.0", 5446 "bind_port": 10000, // "randomly" chosen from our range of 1 5447 "local_service_address": "127.0.0.1:8000", // port from service reg 5448 "telemetry": makeTelemetryDefaults(reg.ID), 5449 }, 5450 }, 5451 { 5452 name: "global default config merge", 5453 globalConfig: ` 5454 bind_addr = "0.0.0.0" 5455 connect { 5456 enabled = true 5457 proxy { 5458 allow_managed_api_registration = true 5459 } 5460 proxy_defaults = { 5461 config = { 5462 connect_timeout_ms = 1000 5463 } 5464 } 5465 } 5466 ports { 5467 proxy_min_port = 10000 5468 proxy_max_port = 10000 5469 } 5470 telemetry { 5471 statsite_address = "localhost:8989" 5472 } 5473 `, 5474 proxy: structs.ServiceDefinitionConnectProxy{ 5475 Config: map[string]interface{}{ 5476 "foo": "bar", 5477 }, 5478 }, 5479 wantMode: api.ProxyExecModeDaemon, 5480 wantCommand: defaultCommand, 5481 wantConfig: map[string]interface{}{ 5482 "bind_address": "0.0.0.0", 5483 "bind_port": 10000, // "randomly" chosen from our range of 1 5484 "local_service_address": "127.0.0.1:8000", // port from service reg 5485 "connect_timeout_ms": 1000, 5486 "foo": "bar", 5487 "telemetry": lib.TelemetryConfig{ 5488 FilterDefault: true, 5489 MetricsPrefix: "consul.proxy." + reg.ID, 5490 StatsiteAddr: "localhost:8989", 5491 }, 5492 }, 5493 }, 5494 { 5495 name: "overrides in reg", 5496 globalConfig: ` 5497 bind_addr = "0.0.0.0" 5498 connect { 5499 enabled = true 5500 proxy { 5501 allow_managed_api_registration = true 5502 } 5503 proxy_defaults = { 5504 exec_mode = "daemon" 5505 daemon_command = ["daemon.sh"] 5506 script_command = ["script.sh"] 5507 config = { 5508 connect_timeout_ms = 1000 5509 } 5510 } 5511 } 5512 ports { 5513 proxy_min_port = 10000 5514 proxy_max_port = 10000 5515 } 5516 telemetry { 5517 statsite_address = "localhost:8989" 5518 } 5519 `, 5520 proxy: structs.ServiceDefinitionConnectProxy{ 5521 ExecMode: "script", 5522 Command: []string{"foo.sh"}, 5523 Config: map[string]interface{}{ 5524 "connect_timeout_ms": 2000, 5525 "bind_address": "127.0.0.1", 5526 "bind_port": 1024, 5527 "local_service_address": "127.0.0.1:9191", 5528 "telemetry": map[string]interface{}{ 5529 "statsite_address": "stats.it:10101", 5530 "metrics_prefix": "foo", // important! checks that our prefix logic respects user customization 5531 }, 5532 }, 5533 }, 5534 wantMode: api.ProxyExecModeScript, 5535 wantCommand: []string{"foo.sh"}, 5536 wantConfig: map[string]interface{}{ 5537 "bind_address": "127.0.0.1", 5538 "bind_port": int(1024), 5539 "local_service_address": "127.0.0.1:9191", 5540 "connect_timeout_ms": float64(2000), 5541 "telemetry": lib.TelemetryConfig{ 5542 FilterDefault: true, 5543 MetricsPrefix: "foo", 5544 StatsiteAddr: "stats.it:10101", 5545 }, 5546 }, 5547 }, 5548 { 5549 name: "reg telemetry not compatible, preserved with no merge", 5550 globalConfig: ` 5551 connect { 5552 enabled = true 5553 proxy { 5554 allow_managed_api_registration = true 5555 } 5556 } 5557 ports { 5558 proxy_min_port = 10000 5559 proxy_max_port = 10000 5560 } 5561 telemetry { 5562 statsite_address = "localhost:8989" 5563 } 5564 `, 5565 proxy: structs.ServiceDefinitionConnectProxy{ 5566 ExecMode: "script", 5567 Command: []string{"foo.sh"}, 5568 Config: map[string]interface{}{ 5569 "telemetry": map[string]interface{}{ 5570 "foo": "bar", 5571 }, 5572 }, 5573 }, 5574 wantMode: api.ProxyExecModeScript, 5575 wantCommand: []string{"foo.sh"}, 5576 wantConfig: map[string]interface{}{ 5577 "bind_address": "127.0.0.1", 5578 "bind_port": 10000, // "randomly" chosen from our range of 1 5579 "local_service_address": "127.0.0.1:8000", // port from service reg 5580 "telemetry": map[string]interface{}{ 5581 "foo": "bar", 5582 }, 5583 }, 5584 }, 5585 { 5586 name: "reg passed through with no agent config added if not proxy token auth", 5587 useToken: "foo", // no actual ACLs set so this any token will work but has to be non-empty to be used below 5588 globalConfig: ` 5589 bind_addr = "0.0.0.0" 5590 connect { 5591 enabled = true 5592 proxy { 5593 allow_managed_api_registration = true 5594 } 5595 proxy_defaults = { 5596 exec_mode = "daemon" 5597 daemon_command = ["daemon.sh"] 5598 script_command = ["script.sh"] 5599 config = { 5600 connect_timeout_ms = 1000 5601 } 5602 } 5603 } 5604 ports { 5605 proxy_min_port = 10000 5606 proxy_max_port = 10000 5607 } 5608 telemetry { 5609 statsite_address = "localhost:8989" 5610 } 5611 `, 5612 proxy: structs.ServiceDefinitionConnectProxy{ 5613 ExecMode: "script", 5614 Command: []string{"foo.sh"}, 5615 Config: map[string]interface{}{ 5616 "connect_timeout_ms": 2000, 5617 "bind_address": "127.0.0.1", 5618 "bind_port": 1024, 5619 "local_service_address": "127.0.0.1:9191", 5620 "telemetry": map[string]interface{}{ 5621 "metrics_prefix": "foo", 5622 }, 5623 }, 5624 }, 5625 wantMode: api.ProxyExecModeScript, 5626 wantCommand: []string{"foo.sh"}, 5627 wantConfig: map[string]interface{}{ 5628 "bind_address": "127.0.0.1", 5629 "bind_port": int(1024), 5630 "local_service_address": "127.0.0.1:9191", 5631 "connect_timeout_ms": float64(2000), 5632 "telemetry": map[string]interface{}{ // No defaults merged 5633 "metrics_prefix": "foo", 5634 }, 5635 }, 5636 }, 5637 } 5638 5639 for _, tt := range tests { 5640 t.Run(tt.name, func(t *testing.T) { 5641 assert := assert.New(t) 5642 require := require.New(t) 5643 5644 a := NewTestAgent(t, t.Name(), tt.globalConfig) 5645 defer a.Shutdown() 5646 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 5647 5648 // Register the basic service with the required config 5649 { 5650 reg.Connect.Proxy = &tt.proxy 5651 req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(reg)) 5652 resp := httptest.NewRecorder() 5653 _, err := a.srv.AgentRegisterService(resp, req) 5654 require.NoError(err) 5655 require.Equal(200, resp.Code, "body: %s", resp.Body.String()) 5656 } 5657 5658 proxy := a.State.Proxy("test-id-proxy") 5659 require.NotNil(proxy) 5660 require.NotEmpty(proxy.ProxyToken) 5661 5662 req, _ := http.NewRequest("GET", "/v1/agent/connect/proxy/test-id-proxy", nil) 5663 if tt.useToken != "" { 5664 req.Header.Set("X-Consul-Token", tt.useToken) 5665 } else { 5666 req.Header.Set("X-Consul-Token", proxy.ProxyToken) 5667 } 5668 resp := httptest.NewRecorder() 5669 obj, err := a.srv.AgentConnectProxyConfig(resp, req) 5670 require.NoError(err) 5671 5672 proxyCfg := obj.(*api.ConnectProxyConfig) 5673 assert.Equal("test-id-proxy", proxyCfg.ProxyServiceID) 5674 assert.Equal("test-id", proxyCfg.TargetServiceID) 5675 assert.Equal("test", proxyCfg.TargetServiceName) 5676 assert.Equal(tt.wantMode, proxyCfg.ExecMode) 5677 assert.Equal(tt.wantCommand, proxyCfg.Command) 5678 require.Equal(tt.wantConfig, proxyCfg.Config) 5679 }) 5680 } 5681 } 5682 5683 func TestAgentConnectAuthorize_badBody(t *testing.T) { 5684 t.Parallel() 5685 5686 assert := assert.New(t) 5687 require := require.New(t) 5688 a := NewTestAgent(t, t.Name(), "") 5689 defer a.Shutdown() 5690 5691 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 5692 args := []string{} 5693 req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) 5694 resp := httptest.NewRecorder() 5695 respRaw, err := a.srv.AgentConnectAuthorize(resp, req) 5696 require.Error(err) 5697 assert.Nil(respRaw) 5698 // Note that BadRequestError is handled outside the endpoint handler so we 5699 // still see a 200 if we check here. 5700 assert.Contains(err.Error(), "decode failed") 5701 } 5702 5703 func TestAgentConnectAuthorize_noTarget(t *testing.T) { 5704 t.Parallel() 5705 5706 assert := assert.New(t) 5707 require := require.New(t) 5708 a := NewTestAgent(t, t.Name(), "") 5709 defer a.Shutdown() 5710 5711 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 5712 args := &structs.ConnectAuthorizeRequest{} 5713 req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) 5714 resp := httptest.NewRecorder() 5715 respRaw, err := a.srv.AgentConnectAuthorize(resp, req) 5716 require.Error(err) 5717 assert.Nil(respRaw) 5718 // Note that BadRequestError is handled outside the endpoint handler so we 5719 // still see a 200 if we check here. 5720 assert.Contains(err.Error(), "Target service must be specified") 5721 } 5722 5723 // Client ID is not in the valid URI format 5724 func TestAgentConnectAuthorize_idInvalidFormat(t *testing.T) { 5725 t.Parallel() 5726 5727 assert := assert.New(t) 5728 require := require.New(t) 5729 a := NewTestAgent(t, t.Name(), "") 5730 defer a.Shutdown() 5731 5732 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 5733 args := &structs.ConnectAuthorizeRequest{ 5734 Target: "web", 5735 ClientCertURI: "tubes", 5736 } 5737 req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) 5738 resp := httptest.NewRecorder() 5739 respRaw, err := a.srv.AgentConnectAuthorize(resp, req) 5740 require.Error(err) 5741 assert.Nil(respRaw) 5742 // Note that BadRequestError is handled outside the endpoint handler so we 5743 // still see a 200 if we check here. 5744 assert.Contains(err.Error(), "ClientCertURI not a valid Connect identifier") 5745 } 5746 5747 // Client ID is a valid URI but its not a service URI 5748 func TestAgentConnectAuthorize_idNotService(t *testing.T) { 5749 t.Parallel() 5750 5751 assert := assert.New(t) 5752 require := require.New(t) 5753 a := NewTestAgent(t, t.Name(), "") 5754 defer a.Shutdown() 5755 5756 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 5757 args := &structs.ConnectAuthorizeRequest{ 5758 Target: "web", 5759 ClientCertURI: "spiffe://1234.consul", 5760 } 5761 req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) 5762 resp := httptest.NewRecorder() 5763 respRaw, err := a.srv.AgentConnectAuthorize(resp, req) 5764 require.Error(err) 5765 assert.Nil(respRaw) 5766 // Note that BadRequestError is handled outside the endpoint handler so we 5767 // still see a 200 if we check here. 5768 assert.Contains(err.Error(), "ClientCertURI not a valid Service identifier") 5769 } 5770 5771 // Test when there is an intention allowing the connection 5772 func TestAgentConnectAuthorize_allow(t *testing.T) { 5773 t.Parallel() 5774 5775 require := require.New(t) 5776 a := NewTestAgent(t, t.Name(), "") 5777 defer a.Shutdown() 5778 5779 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 5780 target := "db" 5781 5782 // Create some intentions 5783 var ixnId string 5784 { 5785 req := structs.IntentionRequest{ 5786 Datacenter: "dc1", 5787 Op: structs.IntentionOpCreate, 5788 Intention: structs.TestIntention(t), 5789 } 5790 req.Intention.SourceNS = structs.IntentionDefaultNamespace 5791 req.Intention.SourceName = "web" 5792 req.Intention.DestinationNS = structs.IntentionDefaultNamespace 5793 req.Intention.DestinationName = target 5794 req.Intention.Action = structs.IntentionActionAllow 5795 5796 require.Nil(a.RPC("Intention.Apply", &req, &ixnId)) 5797 } 5798 5799 args := &structs.ConnectAuthorizeRequest{ 5800 Target: target, 5801 ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), 5802 } 5803 req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) 5804 resp := httptest.NewRecorder() 5805 respRaw, err := a.srv.AgentConnectAuthorize(resp, req) 5806 require.Nil(err) 5807 require.Equal(200, resp.Code) 5808 require.Equal("MISS", resp.Header().Get("X-Cache")) 5809 5810 obj := respRaw.(*connectAuthorizeResp) 5811 require.True(obj.Authorized) 5812 require.Contains(obj.Reason, "Matched") 5813 5814 // Make the request again 5815 { 5816 req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) 5817 resp := httptest.NewRecorder() 5818 respRaw, err := a.srv.AgentConnectAuthorize(resp, req) 5819 require.Nil(err) 5820 require.Equal(200, resp.Code) 5821 5822 obj := respRaw.(*connectAuthorizeResp) 5823 require.True(obj.Authorized) 5824 require.Contains(obj.Reason, "Matched") 5825 5826 // That should've been a cache hit. 5827 require.Equal("HIT", resp.Header().Get("X-Cache")) 5828 } 5829 5830 // Change the intention 5831 { 5832 req := structs.IntentionRequest{ 5833 Datacenter: "dc1", 5834 Op: structs.IntentionOpUpdate, 5835 Intention: structs.TestIntention(t), 5836 } 5837 req.Intention.ID = ixnId 5838 req.Intention.SourceNS = structs.IntentionDefaultNamespace 5839 req.Intention.SourceName = "web" 5840 req.Intention.DestinationNS = structs.IntentionDefaultNamespace 5841 req.Intention.DestinationName = target 5842 req.Intention.Action = structs.IntentionActionDeny 5843 5844 require.Nil(a.RPC("Intention.Apply", &req, &ixnId)) 5845 } 5846 5847 // Short sleep lets the cache background refresh happen 5848 time.Sleep(100 * time.Millisecond) 5849 5850 // Make the request again 5851 { 5852 req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) 5853 resp := httptest.NewRecorder() 5854 respRaw, err := a.srv.AgentConnectAuthorize(resp, req) 5855 require.Nil(err) 5856 require.Equal(200, resp.Code) 5857 5858 obj := respRaw.(*connectAuthorizeResp) 5859 require.False(obj.Authorized) 5860 require.Contains(obj.Reason, "Matched") 5861 5862 // That should've been a cache hit, too, since it updated in the 5863 // background. 5864 require.Equal("HIT", resp.Header().Get("X-Cache")) 5865 } 5866 } 5867 5868 // Test when there is an intention denying the connection 5869 func TestAgentConnectAuthorize_deny(t *testing.T) { 5870 t.Parallel() 5871 5872 assert := assert.New(t) 5873 a := NewTestAgent(t, t.Name(), "") 5874 defer a.Shutdown() 5875 5876 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 5877 target := "db" 5878 5879 // Create some intentions 5880 { 5881 req := structs.IntentionRequest{ 5882 Datacenter: "dc1", 5883 Op: structs.IntentionOpCreate, 5884 Intention: structs.TestIntention(t), 5885 } 5886 req.Intention.SourceNS = structs.IntentionDefaultNamespace 5887 req.Intention.SourceName = "web" 5888 req.Intention.DestinationNS = structs.IntentionDefaultNamespace 5889 req.Intention.DestinationName = target 5890 req.Intention.Action = structs.IntentionActionDeny 5891 5892 var reply string 5893 assert.Nil(a.RPC("Intention.Apply", &req, &reply)) 5894 } 5895 5896 args := &structs.ConnectAuthorizeRequest{ 5897 Target: target, 5898 ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), 5899 } 5900 req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) 5901 resp := httptest.NewRecorder() 5902 respRaw, err := a.srv.AgentConnectAuthorize(resp, req) 5903 assert.Nil(err) 5904 assert.Equal(200, resp.Code) 5905 5906 obj := respRaw.(*connectAuthorizeResp) 5907 assert.False(obj.Authorized) 5908 assert.Contains(obj.Reason, "Matched") 5909 } 5910 5911 // Test when there is an intention allowing service with a different trust 5912 // domain. We allow this because migration between trust domains shouldn't cause 5913 // an outage even if we have stale info about current trusted domains. It's safe 5914 // because the CA root is either unique to this cluster and not used to sign 5915 // anything external, or path validation can be used to ensure that the CA can 5916 // only issue certs that are valid for the specific cluster trust domain at x509 5917 // level which is enforced by TLS handshake. 5918 func TestAgentConnectAuthorize_allowTrustDomain(t *testing.T) { 5919 t.Parallel() 5920 5921 assert := assert.New(t) 5922 require := require.New(t) 5923 a := NewTestAgent(t, t.Name(), "") 5924 defer a.Shutdown() 5925 5926 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 5927 target := "db" 5928 5929 // Create some intentions 5930 { 5931 req := structs.IntentionRequest{ 5932 Datacenter: "dc1", 5933 Op: structs.IntentionOpCreate, 5934 Intention: structs.TestIntention(t), 5935 } 5936 req.Intention.SourceNS = structs.IntentionDefaultNamespace 5937 req.Intention.SourceName = "web" 5938 req.Intention.DestinationNS = structs.IntentionDefaultNamespace 5939 req.Intention.DestinationName = target 5940 req.Intention.Action = structs.IntentionActionAllow 5941 5942 var reply string 5943 require.NoError(a.RPC("Intention.Apply", &req, &reply)) 5944 } 5945 5946 { 5947 args := &structs.ConnectAuthorizeRequest{ 5948 Target: target, 5949 ClientCertURI: "spiffe://fake-domain.consul/ns/default/dc/dc1/svc/web", 5950 } 5951 req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) 5952 resp := httptest.NewRecorder() 5953 respRaw, err := a.srv.AgentConnectAuthorize(resp, req) 5954 require.NoError(err) 5955 assert.Equal(200, resp.Code) 5956 5957 obj := respRaw.(*connectAuthorizeResp) 5958 require.True(obj.Authorized) 5959 require.Contains(obj.Reason, "Matched") 5960 } 5961 } 5962 5963 func TestAgentConnectAuthorize_denyWildcard(t *testing.T) { 5964 t.Parallel() 5965 5966 assert := assert.New(t) 5967 require := require.New(t) 5968 a := NewTestAgent(t, t.Name(), "") 5969 defer a.Shutdown() 5970 testrpc.WaitForTestAgent(t, a.RPC, "dc1") 5971 5972 target := "db" 5973 5974 // Create some intentions 5975 { 5976 // Deny wildcard to DB 5977 req := structs.IntentionRequest{ 5978 Datacenter: "dc1", 5979 Op: structs.IntentionOpCreate, 5980 Intention: structs.TestIntention(t), 5981 } 5982 req.Intention.SourceNS = structs.IntentionDefaultNamespace 5983 req.Intention.SourceName = "*" 5984 req.Intention.DestinationNS = structs.IntentionDefaultNamespace 5985 req.Intention.DestinationName = target 5986 req.Intention.Action = structs.IntentionActionDeny 5987 5988 var reply string 5989 require.NoError(a.RPC("Intention.Apply", &req, &reply)) 5990 } 5991 { 5992 // Allow web to DB 5993 req := structs.IntentionRequest{ 5994 Datacenter: "dc1", 5995 Op: structs.IntentionOpCreate, 5996 Intention: structs.TestIntention(t), 5997 } 5998 req.Intention.SourceNS = structs.IntentionDefaultNamespace 5999 req.Intention.SourceName = "web" 6000 req.Intention.DestinationNS = structs.IntentionDefaultNamespace 6001 req.Intention.DestinationName = target 6002 req.Intention.Action = structs.IntentionActionAllow 6003 6004 var reply string 6005 assert.Nil(a.RPC("Intention.Apply", &req, &reply)) 6006 } 6007 6008 // Web should be allowed 6009 { 6010 args := &structs.ConnectAuthorizeRequest{ 6011 Target: target, 6012 ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), 6013 } 6014 req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) 6015 resp := httptest.NewRecorder() 6016 respRaw, err := a.srv.AgentConnectAuthorize(resp, req) 6017 require.NoError(err) 6018 assert.Equal(200, resp.Code) 6019 6020 obj := respRaw.(*connectAuthorizeResp) 6021 assert.True(obj.Authorized) 6022 assert.Contains(obj.Reason, "Matched") 6023 } 6024 6025 // API should be denied 6026 { 6027 args := &structs.ConnectAuthorizeRequest{ 6028 Target: target, 6029 ClientCertURI: connect.TestSpiffeIDService(t, "api").URI().String(), 6030 } 6031 req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) 6032 resp := httptest.NewRecorder() 6033 respRaw, err := a.srv.AgentConnectAuthorize(resp, req) 6034 require.NoError(err) 6035 assert.Equal(200, resp.Code) 6036 6037 obj := respRaw.(*connectAuthorizeResp) 6038 assert.False(obj.Authorized) 6039 assert.Contains(obj.Reason, "Matched") 6040 } 6041 } 6042 6043 // Test that authorize fails without service:write for the target service. 6044 func TestAgentConnectAuthorize_serviceWrite(t *testing.T) { 6045 t.Parallel() 6046 6047 assert := assert.New(t) 6048 a := NewTestAgent(t, t.Name(), TestACLConfig()) 6049 defer a.Shutdown() 6050 testrpc.WaitForLeader(t, a.RPC, "dc1") 6051 6052 // Create an ACL 6053 var token string 6054 { 6055 args := map[string]interface{}{ 6056 "Name": "User Token", 6057 "Type": "client", 6058 "Rules": `service "foo" { policy = "read" }`, 6059 } 6060 req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args)) 6061 resp := httptest.NewRecorder() 6062 obj, err := a.srv.ACLCreate(resp, req) 6063 if err != nil { 6064 t.Fatalf("err: %v", err) 6065 } 6066 aclResp := obj.(aclCreateResponse) 6067 token = aclResp.ID 6068 } 6069 6070 args := &structs.ConnectAuthorizeRequest{ 6071 Target: "foo", 6072 ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), 6073 } 6074 req, _ := http.NewRequest("POST", 6075 "/v1/agent/connect/authorize?token="+token, jsonReader(args)) 6076 resp := httptest.NewRecorder() 6077 _, err := a.srv.AgentConnectAuthorize(resp, req) 6078 assert.True(acl.IsErrPermissionDenied(err)) 6079 } 6080 6081 // Test when no intentions match w/ a default deny policy 6082 func TestAgentConnectAuthorize_defaultDeny(t *testing.T) { 6083 t.Parallel() 6084 6085 assert := assert.New(t) 6086 a := NewTestAgent(t, t.Name(), TestACLConfig()) 6087 defer a.Shutdown() 6088 testrpc.WaitForLeader(t, a.RPC, "dc1") 6089 6090 args := &structs.ConnectAuthorizeRequest{ 6091 Target: "foo", 6092 ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), 6093 } 6094 req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize?token=root", jsonReader(args)) 6095 resp := httptest.NewRecorder() 6096 respRaw, err := a.srv.AgentConnectAuthorize(resp, req) 6097 assert.Nil(err) 6098 assert.Equal(200, resp.Code) 6099 6100 obj := respRaw.(*connectAuthorizeResp) 6101 assert.False(obj.Authorized) 6102 assert.Contains(obj.Reason, "Default behavior") 6103 } 6104 6105 // Test when no intentions match w/ a default allow policy 6106 func TestAgentConnectAuthorize_defaultAllow(t *testing.T) { 6107 t.Parallel() 6108 6109 assert := assert.New(t) 6110 dc1 := "dc1" 6111 a := NewTestAgent(t, t.Name(), ` 6112 acl_datacenter = "`+dc1+`" 6113 acl_default_policy = "allow" 6114 acl_master_token = "root" 6115 acl_agent_token = "root" 6116 acl_agent_master_token = "towel" 6117 acl_enforce_version_8 = true 6118 `) 6119 defer a.Shutdown() 6120 testrpc.WaitForTestAgent(t, a.RPC, dc1) 6121 6122 args := &structs.ConnectAuthorizeRequest{ 6123 Target: "foo", 6124 ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), 6125 } 6126 req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize?token=root", jsonReader(args)) 6127 resp := httptest.NewRecorder() 6128 respRaw, err := a.srv.AgentConnectAuthorize(resp, req) 6129 assert.Nil(err) 6130 assert.Equal(200, resp.Code) 6131 assert.NotNil(respRaw) 6132 6133 obj := respRaw.(*connectAuthorizeResp) 6134 assert.True(obj.Authorized) 6135 assert.Contains(obj.Reason, "Default behavior") 6136 } 6137 6138 // testAllowProxyConfig returns agent config to allow managed proxy API 6139 // registration. 6140 func testAllowProxyConfig() string { 6141 return ` 6142 connect { 6143 enabled = true 6144 6145 proxy { 6146 allow_managed_api_registration = true 6147 } 6148 } 6149 ` 6150 } 6151 6152 func TestAgent_Host(t *testing.T) { 6153 t.Parallel() 6154 assert := assert.New(t) 6155 6156 dc1 := "dc1" 6157 a := NewTestAgent(t, t.Name(), ` 6158 acl_datacenter = "`+dc1+`" 6159 acl_default_policy = "allow" 6160 acl_master_token = "master" 6161 acl_agent_token = "agent" 6162 acl_agent_master_token = "towel" 6163 acl_enforce_version_8 = true 6164 `) 6165 defer a.Shutdown() 6166 6167 testrpc.WaitForLeader(t, a.RPC, "dc1") 6168 req, _ := http.NewRequest("GET", "/v1/agent/host?token=master", nil) 6169 resp := httptest.NewRecorder() 6170 respRaw, err := a.srv.AgentHost(resp, req) 6171 assert.Nil(err) 6172 assert.Equal(http.StatusOK, resp.Code) 6173 assert.NotNil(respRaw) 6174 6175 obj := respRaw.(*debug.HostInfo) 6176 assert.NotNil(obj.CollectionTime) 6177 assert.Empty(obj.Errors) 6178 } 6179 6180 func TestAgent_HostBadACL(t *testing.T) { 6181 t.Parallel() 6182 assert := assert.New(t) 6183 6184 dc1 := "dc1" 6185 a := NewTestAgent(t, t.Name(), ` 6186 acl_datacenter = "`+dc1+`" 6187 acl_default_policy = "deny" 6188 acl_master_token = "root" 6189 acl_agent_token = "agent" 6190 acl_agent_master_token = "towel" 6191 acl_enforce_version_8 = true 6192 `) 6193 defer a.Shutdown() 6194 6195 testrpc.WaitForLeader(t, a.RPC, "dc1") 6196 req, _ := http.NewRequest("GET", "/v1/agent/host?token=agent", nil) 6197 resp := httptest.NewRecorder() 6198 respRaw, err := a.srv.AgentHost(resp, req) 6199 assert.EqualError(err, "ACL not found") 6200 assert.Equal(http.StatusOK, resp.Code) 6201 assert.Nil(respRaw) 6202 }