github.com/Iqoqo/consul@v1.4.5/agent/local/state_test.go (about)

     1  package local_test
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"log"
     7  	"os"
     8  	"reflect"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/hashicorp/consul/testrpc"
    13  
    14  	"github.com/hashicorp/go-memdb"
    15  
    16  	"github.com/hashicorp/consul/agent"
    17  	"github.com/hashicorp/consul/agent/config"
    18  	"github.com/hashicorp/consul/agent/local"
    19  	"github.com/hashicorp/consul/agent/structs"
    20  	"github.com/hashicorp/consul/agent/token"
    21  	"github.com/hashicorp/consul/api"
    22  	"github.com/hashicorp/consul/testutil/retry"
    23  	"github.com/hashicorp/consul/types"
    24  	"github.com/pascaldekloe/goe/verify"
    25  	"github.com/stretchr/testify/assert"
    26  	"github.com/stretchr/testify/require"
    27  )
    28  
    29  func TestAgentAntiEntropy_Services(t *testing.T) {
    30  	t.Parallel()
    31  	a := &agent.TestAgent{Name: t.Name()}
    32  	a.Start(t)
    33  	defer a.Shutdown()
    34  	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
    35  
    36  	// Register info
    37  	args := &structs.RegisterRequest{
    38  		Datacenter: "dc1",
    39  		Node:       a.Config.NodeName,
    40  		Address:    "127.0.0.1",
    41  	}
    42  
    43  	// Exists both, same (noop)
    44  	var out struct{}
    45  	srv1 := &structs.NodeService{
    46  		ID:      "mysql",
    47  		Service: "mysql",
    48  		Tags:    []string{"master"},
    49  		Port:    5000,
    50  		Weights: &structs.Weights{
    51  			Passing: 1,
    52  			Warning: 1,
    53  		},
    54  	}
    55  	a.State.AddService(srv1, "")
    56  	args.Service = srv1
    57  	if err := a.RPC("Catalog.Register", args, &out); err != nil {
    58  		t.Fatalf("err: %v", err)
    59  	}
    60  
    61  	// Exists both, different (update)
    62  	srv2 := &structs.NodeService{
    63  		ID:      "redis",
    64  		Service: "redis",
    65  		Tags:    []string{},
    66  		Port:    8000,
    67  		Weights: &structs.Weights{
    68  			Passing: 1,
    69  			Warning: 0,
    70  		},
    71  	}
    72  	a.State.AddService(srv2, "")
    73  
    74  	srv2_mod := new(structs.NodeService)
    75  	*srv2_mod = *srv2
    76  	srv2_mod.Port = 9000
    77  	args.Service = srv2_mod
    78  	if err := a.RPC("Catalog.Register", args, &out); err != nil {
    79  		t.Fatalf("err: %v", err)
    80  	}
    81  
    82  	// Exists local (create)
    83  	srv3 := &structs.NodeService{
    84  		ID:      "web",
    85  		Service: "web",
    86  		Tags:    []string{},
    87  		Port:    80,
    88  		Weights: &structs.Weights{
    89  			Passing: 1,
    90  			Warning: 1,
    91  		},
    92  	}
    93  	a.State.AddService(srv3, "")
    94  
    95  	// Exists remote (delete)
    96  	srv4 := &structs.NodeService{
    97  		ID:      "lb",
    98  		Service: "lb",
    99  		Tags:    []string{},
   100  		Port:    443,
   101  		Weights: &structs.Weights{
   102  			Passing: 1,
   103  			Warning: 0,
   104  		},
   105  	}
   106  	args.Service = srv4
   107  	if err := a.RPC("Catalog.Register", args, &out); err != nil {
   108  		t.Fatalf("err: %v", err)
   109  	}
   110  
   111  	// Exists both, different address (update)
   112  	srv5 := &structs.NodeService{
   113  		ID:      "api",
   114  		Service: "api",
   115  		Tags:    []string{},
   116  		Address: "127.0.0.10",
   117  		Port:    8000,
   118  		Weights: &structs.Weights{
   119  			Passing: 1,
   120  			Warning: 1,
   121  		},
   122  	}
   123  	a.State.AddService(srv5, "")
   124  
   125  	srv5_mod := new(structs.NodeService)
   126  	*srv5_mod = *srv5
   127  	srv5_mod.Address = "127.0.0.1"
   128  	args.Service = srv5_mod
   129  	if err := a.RPC("Catalog.Register", args, &out); err != nil {
   130  		t.Fatalf("err: %v", err)
   131  	}
   132  
   133  	// Exists local, in sync, remote missing (create)
   134  	srv6 := &structs.NodeService{
   135  		ID:      "cache",
   136  		Service: "cache",
   137  		Tags:    []string{},
   138  		Port:    11211,
   139  		Weights: &structs.Weights{
   140  			Passing: 1,
   141  			Warning: 0,
   142  		},
   143  	}
   144  	a.State.SetServiceState(&local.ServiceState{
   145  		Service: srv6,
   146  		InSync:  true,
   147  	})
   148  
   149  	if err := a.State.SyncFull(); err != nil {
   150  		t.Fatalf("err: %v", err)
   151  	}
   152  
   153  	var services structs.IndexedNodeServices
   154  	req := structs.NodeSpecificRequest{
   155  		Datacenter: "dc1",
   156  		Node:       a.Config.NodeName,
   157  	}
   158  
   159  	if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
   160  		t.Fatalf("err: %v", err)
   161  	}
   162  
   163  	// Make sure we sent along our node info when we synced.
   164  	id := services.NodeServices.Node.ID
   165  	addrs := services.NodeServices.Node.TaggedAddresses
   166  	meta := services.NodeServices.Node.Meta
   167  	delete(meta, structs.MetaSegmentKey) // Added later, not in config.
   168  	verify.Values(t, "node id", id, a.Config.NodeID)
   169  	verify.Values(t, "tagged addrs", addrs, a.Config.TaggedAddresses)
   170  	verify.Values(t, "node meta", meta, a.Config.NodeMeta)
   171  
   172  	// We should have 6 services (consul included)
   173  	if len(services.NodeServices.Services) != 6 {
   174  		t.Fatalf("bad: %v", services.NodeServices.Services)
   175  	}
   176  
   177  	// All the services should match
   178  	for id, serv := range services.NodeServices.Services {
   179  		serv.CreateIndex, serv.ModifyIndex = 0, 0
   180  		switch id {
   181  		case "mysql":
   182  			if !reflect.DeepEqual(serv, srv1) {
   183  				t.Fatalf("bad: %v %v", serv, srv1)
   184  			}
   185  		case "redis":
   186  			if !reflect.DeepEqual(serv, srv2) {
   187  				t.Fatalf("bad: %#v %#v", serv, srv2)
   188  			}
   189  		case "web":
   190  			if !reflect.DeepEqual(serv, srv3) {
   191  				t.Fatalf("bad: %v %v", serv, srv3)
   192  			}
   193  		case "api":
   194  			if !reflect.DeepEqual(serv, srv5) {
   195  				t.Fatalf("bad: %v %v", serv, srv5)
   196  			}
   197  		case "cache":
   198  			if !reflect.DeepEqual(serv, srv6) {
   199  				t.Fatalf("bad: %v %v", serv, srv6)
   200  			}
   201  		case structs.ConsulServiceID:
   202  			// ignore
   203  		default:
   204  			t.Fatalf("unexpected service: %v", id)
   205  		}
   206  	}
   207  
   208  	if err := servicesInSync(a.State, 5); err != nil {
   209  		t.Fatal(err)
   210  	}
   211  
   212  	// Remove one of the services
   213  	a.State.RemoveService("api")
   214  
   215  	if err := a.State.SyncFull(); err != nil {
   216  		t.Fatalf("err: %v", err)
   217  	}
   218  
   219  	if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
   220  		t.Fatalf("err: %v", err)
   221  	}
   222  
   223  	// We should have 5 services (consul included)
   224  	if len(services.NodeServices.Services) != 5 {
   225  		t.Fatalf("bad: %v", services.NodeServices.Services)
   226  	}
   227  
   228  	// All the services should match
   229  	for id, serv := range services.NodeServices.Services {
   230  		serv.CreateIndex, serv.ModifyIndex = 0, 0
   231  		switch id {
   232  		case "mysql":
   233  			if !reflect.DeepEqual(serv, srv1) {
   234  				t.Fatalf("bad: %v %v", serv, srv1)
   235  			}
   236  		case "redis":
   237  			if !reflect.DeepEqual(serv, srv2) {
   238  				t.Fatalf("bad: %#v %#v", serv, srv2)
   239  			}
   240  		case "web":
   241  			if !reflect.DeepEqual(serv, srv3) {
   242  				t.Fatalf("bad: %v %v", serv, srv3)
   243  			}
   244  		case "cache":
   245  			if !reflect.DeepEqual(serv, srv6) {
   246  				t.Fatalf("bad: %v %v", serv, srv6)
   247  			}
   248  		case structs.ConsulServiceID:
   249  			// ignore
   250  		default:
   251  			t.Fatalf("unexpected service: %v", id)
   252  		}
   253  	}
   254  
   255  	if err := servicesInSync(a.State, 4); err != nil {
   256  		t.Fatal(err)
   257  	}
   258  }
   259  
   260  func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) {
   261  	t.Parallel()
   262  
   263  	assert := assert.New(t)
   264  	a := &agent.TestAgent{Name: t.Name()}
   265  	a.Start(t)
   266  	defer a.Shutdown()
   267  	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
   268  
   269  	// Register node info
   270  	var out struct{}
   271  	args := &structs.RegisterRequest{
   272  		Datacenter: "dc1",
   273  		Node:       a.Config.NodeName,
   274  		Address:    "127.0.0.1",
   275  	}
   276  
   277  	// Exists both same (noop)
   278  	srv1 := &structs.NodeService{
   279  		Kind:    structs.ServiceKindConnectProxy,
   280  		ID:      "mysql-proxy",
   281  		Service: "mysql-proxy",
   282  		Port:    5000,
   283  		Proxy:   structs.ConnectProxyConfig{DestinationServiceName: "db"},
   284  		Weights: &structs.Weights{
   285  			Passing: 1,
   286  			Warning: 1,
   287  		},
   288  	}
   289  	a.State.AddService(srv1, "")
   290  	args.Service = srv1
   291  	assert.Nil(a.RPC("Catalog.Register", args, &out))
   292  
   293  	// Exists both, different (update)
   294  	srv2 := &structs.NodeService{
   295  		ID:      "redis-proxy",
   296  		Service: "redis-proxy",
   297  		Port:    8000,
   298  		Kind:    structs.ServiceKindConnectProxy,
   299  		Proxy:   structs.ConnectProxyConfig{DestinationServiceName: "redis"},
   300  		Weights: &structs.Weights{
   301  			Passing: 1,
   302  			Warning: 0,
   303  		},
   304  	}
   305  	a.State.AddService(srv2, "")
   306  
   307  	srv2_mod := new(structs.NodeService)
   308  	*srv2_mod = *srv2
   309  	srv2_mod.Port = 9000
   310  	args.Service = srv2_mod
   311  	assert.Nil(a.RPC("Catalog.Register", args, &out))
   312  
   313  	// Exists local (create)
   314  	srv3 := &structs.NodeService{
   315  		ID:      "web-proxy",
   316  		Service: "web-proxy",
   317  		Port:    80,
   318  		Kind:    structs.ServiceKindConnectProxy,
   319  		Proxy:   structs.ConnectProxyConfig{DestinationServiceName: "web"},
   320  		Weights: &structs.Weights{
   321  			Passing: 1,
   322  			Warning: 1,
   323  		},
   324  	}
   325  	a.State.AddService(srv3, "")
   326  
   327  	// Exists remote (delete)
   328  	srv4 := &structs.NodeService{
   329  		ID:      "lb-proxy",
   330  		Service: "lb-proxy",
   331  		Port:    443,
   332  		Kind:    structs.ServiceKindConnectProxy,
   333  		Proxy:   structs.ConnectProxyConfig{DestinationServiceName: "lb"},
   334  		Weights: &structs.Weights{
   335  			Passing: 1,
   336  			Warning: 0,
   337  		},
   338  	}
   339  	args.Service = srv4
   340  	assert.Nil(a.RPC("Catalog.Register", args, &out))
   341  
   342  	// Exists local, in sync, remote missing (create)
   343  	srv5 := &structs.NodeService{
   344  		ID:      "cache-proxy",
   345  		Service: "cache-proxy",
   346  		Port:    11211,
   347  		Kind:    structs.ServiceKindConnectProxy,
   348  		Proxy:   structs.ConnectProxyConfig{DestinationServiceName: "cache-proxy"},
   349  		Weights: &structs.Weights{
   350  			Passing: 1,
   351  			Warning: 1,
   352  		},
   353  	}
   354  	a.State.SetServiceState(&local.ServiceState{
   355  		Service: srv5,
   356  		InSync:  true,
   357  	})
   358  
   359  	assert.Nil(a.State.SyncFull())
   360  
   361  	var services structs.IndexedNodeServices
   362  	req := structs.NodeSpecificRequest{
   363  		Datacenter: "dc1",
   364  		Node:       a.Config.NodeName,
   365  	}
   366  	assert.Nil(a.RPC("Catalog.NodeServices", &req, &services))
   367  
   368  	// We should have 5 services (consul included)
   369  	assert.Len(services.NodeServices.Services, 5)
   370  
   371  	// All the services should match
   372  	for id, serv := range services.NodeServices.Services {
   373  		serv.CreateIndex, serv.ModifyIndex = 0, 0
   374  		switch id {
   375  		case "mysql-proxy":
   376  			assert.Equal(srv1, serv)
   377  		case "redis-proxy":
   378  			assert.Equal(srv2, serv)
   379  		case "web-proxy":
   380  			assert.Equal(srv3, serv)
   381  		case "cache-proxy":
   382  			assert.Equal(srv5, serv)
   383  		case structs.ConsulServiceID:
   384  			// ignore
   385  		default:
   386  			t.Fatalf("unexpected service: %v", id)
   387  		}
   388  	}
   389  
   390  	assert.Nil(servicesInSync(a.State, 4))
   391  
   392  	// Remove one of the services
   393  	a.State.RemoveService("cache-proxy")
   394  	assert.Nil(a.State.SyncFull())
   395  	assert.Nil(a.RPC("Catalog.NodeServices", &req, &services))
   396  
   397  	// We should have 4 services (consul included)
   398  	assert.Len(services.NodeServices.Services, 4)
   399  
   400  	// All the services should match
   401  	for id, serv := range services.NodeServices.Services {
   402  		serv.CreateIndex, serv.ModifyIndex = 0, 0
   403  		switch id {
   404  		case "mysql-proxy":
   405  			assert.Equal(srv1, serv)
   406  		case "redis-proxy":
   407  			assert.Equal(srv2, serv)
   408  		case "web-proxy":
   409  			assert.Equal(srv3, serv)
   410  		case structs.ConsulServiceID:
   411  			// ignore
   412  		default:
   413  			t.Fatalf("unexpected service: %v", id)
   414  		}
   415  	}
   416  
   417  	assert.Nil(servicesInSync(a.State, 3))
   418  }
   419  
   420  func TestAgent_ServiceWatchCh(t *testing.T) {
   421  	t.Parallel()
   422  	a := &agent.TestAgent{Name: t.Name()}
   423  	a.Start(t)
   424  	defer a.Shutdown()
   425  	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
   426  
   427  	require := require.New(t)
   428  
   429  	// register a local service
   430  	srv1 := &structs.NodeService{
   431  		ID:      "svc_id1",
   432  		Service: "svc1",
   433  		Tags:    []string{"tag1"},
   434  		Port:    6100,
   435  	}
   436  	require.NoError(a.State.AddService(srv1, ""))
   437  
   438  	verifyState := func(ss *local.ServiceState) {
   439  		require.NotNil(ss)
   440  		require.NotNil(ss.WatchCh)
   441  
   442  		// Sanity check WatchCh blocks
   443  		select {
   444  		case <-ss.WatchCh:
   445  			t.Fatal("should block until service changes")
   446  		default:
   447  		}
   448  	}
   449  
   450  	// Should be able to get a ServiceState
   451  	ss := a.State.ServiceState(srv1.ID)
   452  	verifyState(ss)
   453  
   454  	// Update service in another go routine
   455  	go func() {
   456  		srv2 := srv1
   457  		srv2.Port = 6200
   458  		require.NoError(a.State.AddService(srv2, ""))
   459  	}()
   460  
   461  	// We should observe WatchCh close
   462  	select {
   463  	case <-ss.WatchCh:
   464  		// OK!
   465  	case <-time.After(500 * time.Millisecond):
   466  		t.Fatal("timeout waiting for WatchCh to close")
   467  	}
   468  
   469  	// Should also fire for state being set explicitly
   470  	ss = a.State.ServiceState(srv1.ID)
   471  	verifyState(ss)
   472  
   473  	go func() {
   474  		a.State.SetServiceState(&local.ServiceState{
   475  			Service: ss.Service,
   476  			Token:   "foo",
   477  		})
   478  	}()
   479  
   480  	// We should observe WatchCh close
   481  	select {
   482  	case <-ss.WatchCh:
   483  		// OK!
   484  	case <-time.After(500 * time.Millisecond):
   485  		t.Fatal("timeout waiting for WatchCh to close")
   486  	}
   487  
   488  	// Should also fire for service being removed
   489  	ss = a.State.ServiceState(srv1.ID)
   490  	verifyState(ss)
   491  
   492  	go func() {
   493  		require.NoError(a.State.RemoveService(srv1.ID))
   494  	}()
   495  
   496  	// We should observe WatchCh close
   497  	select {
   498  	case <-ss.WatchCh:
   499  		// OK!
   500  	case <-time.After(500 * time.Millisecond):
   501  		t.Fatal("timeout waiting for WatchCh to close")
   502  	}
   503  }
   504  
   505  func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) {
   506  	t.Parallel()
   507  	a := &agent.TestAgent{Name: t.Name()}
   508  	a.Start(t)
   509  	defer a.Shutdown()
   510  	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
   511  
   512  	args := &structs.RegisterRequest{
   513  		Datacenter: "dc1",
   514  		Node:       a.Config.NodeName,
   515  		Address:    "127.0.0.1",
   516  	}
   517  	var out struct{}
   518  
   519  	// register a local service with tag override enabled
   520  	srv1 := &structs.NodeService{
   521  		ID:                "svc_id1",
   522  		Service:           "svc1",
   523  		Tags:              []string{"tag1"},
   524  		Port:              6100,
   525  		EnableTagOverride: true,
   526  		Weights: &structs.Weights{
   527  			Passing: 1,
   528  			Warning: 1,
   529  		},
   530  	}
   531  	a.State.AddService(srv1, "")
   532  
   533  	// register a local service with tag override disabled
   534  	srv2 := &structs.NodeService{
   535  		ID:                "svc_id2",
   536  		Service:           "svc2",
   537  		Tags:              []string{"tag2"},
   538  		Port:              6200,
   539  		EnableTagOverride: false,
   540  		Weights: &structs.Weights{
   541  			Passing: 1,
   542  			Warning: 1,
   543  		},
   544  	}
   545  	a.State.AddService(srv2, "")
   546  
   547  	// make sure they are both in the catalog
   548  	if err := a.State.SyncChanges(); err != nil {
   549  		t.Fatalf("err: %v", err)
   550  	}
   551  
   552  	// update the services in the catalog and change the tags and port.
   553  	// Only tag changes should be propagated for services where tag
   554  	// override is enabled.
   555  	args.Service = &structs.NodeService{
   556  		ID:                srv1.ID,
   557  		Service:           srv1.Service,
   558  		Tags:              []string{"tag1_mod"},
   559  		Port:              7100,
   560  		EnableTagOverride: true,
   561  		Weights: &structs.Weights{
   562  			Passing: 1,
   563  			Warning: 1,
   564  		},
   565  	}
   566  	if err := a.RPC("Catalog.Register", args, &out); err != nil {
   567  		t.Fatalf("err: %v", err)
   568  	}
   569  
   570  	args.Service = &structs.NodeService{
   571  		ID:                srv2.ID,
   572  		Service:           srv2.Service,
   573  		Tags:              []string{"tag2_mod"},
   574  		Port:              7200,
   575  		EnableTagOverride: false,
   576  		Weights: &structs.Weights{
   577  			Passing: 1,
   578  			Warning: 0,
   579  		},
   580  	}
   581  	if err := a.RPC("Catalog.Register", args, &out); err != nil {
   582  		t.Fatalf("err: %v", err)
   583  	}
   584  
   585  	// sync catalog and local state
   586  	if err := a.State.SyncFull(); err != nil {
   587  		t.Fatalf("err: %v", err)
   588  	}
   589  
   590  	req := structs.NodeSpecificRequest{
   591  		Datacenter: "dc1",
   592  		Node:       a.Config.NodeName,
   593  	}
   594  	var services structs.IndexedNodeServices
   595  
   596  	if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
   597  		t.Fatalf("err: %v", err)
   598  	}
   599  
   600  	// All the services should match
   601  	for id, serv := range services.NodeServices.Services {
   602  		serv.CreateIndex, serv.ModifyIndex = 0, 0
   603  		switch id {
   604  		case "svc_id1":
   605  			// tags should be modified but not the port
   606  			got := serv
   607  			want := &structs.NodeService{
   608  				ID:                "svc_id1",
   609  				Service:           "svc1",
   610  				Tags:              []string{"tag1_mod"},
   611  				Port:              6100,
   612  				EnableTagOverride: true,
   613  				Weights: &structs.Weights{
   614  					Passing: 1,
   615  					Warning: 1,
   616  				},
   617  			}
   618  			if !verify.Values(t, "", got, want) {
   619  				t.FailNow()
   620  			}
   621  		case "svc_id2":
   622  			got, want := serv, srv2
   623  			if !verify.Values(t, "", got, want) {
   624  				t.FailNow()
   625  			}
   626  		case structs.ConsulServiceID:
   627  			// ignore
   628  		default:
   629  			t.Fatalf("unexpected service: %v", id)
   630  		}
   631  	}
   632  
   633  	if err := servicesInSync(a.State, 2); err != nil {
   634  		t.Fatal(err)
   635  	}
   636  }
   637  
   638  func TestAgentAntiEntropy_Services_WithChecks(t *testing.T) {
   639  	t.Parallel()
   640  	a := agent.NewTestAgent(t, t.Name(), "")
   641  	defer a.Shutdown()
   642  	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
   643  
   644  	{
   645  		// Single check
   646  		srv := &structs.NodeService{
   647  			ID:      "mysql",
   648  			Service: "mysql",
   649  			Tags:    []string{"master"},
   650  			Port:    5000,
   651  		}
   652  		a.State.AddService(srv, "")
   653  
   654  		chk := &structs.HealthCheck{
   655  			Node:      a.Config.NodeName,
   656  			CheckID:   "mysql",
   657  			Name:      "mysql",
   658  			ServiceID: "mysql",
   659  			Status:    api.HealthPassing,
   660  		}
   661  		a.State.AddCheck(chk, "")
   662  
   663  		if err := a.State.SyncFull(); err != nil {
   664  			t.Fatal("sync failed: ", err)
   665  		}
   666  
   667  		// We should have 2 services (consul included)
   668  		svcReq := structs.NodeSpecificRequest{
   669  			Datacenter: "dc1",
   670  			Node:       a.Config.NodeName,
   671  		}
   672  		var services structs.IndexedNodeServices
   673  		if err := a.RPC("Catalog.NodeServices", &svcReq, &services); err != nil {
   674  			t.Fatalf("err: %v", err)
   675  		}
   676  		if len(services.NodeServices.Services) != 2 {
   677  			t.Fatalf("bad: %v", services.NodeServices.Services)
   678  		}
   679  
   680  		// We should have one health check
   681  		chkReq := structs.ServiceSpecificRequest{
   682  			Datacenter:  "dc1",
   683  			ServiceName: "mysql",
   684  		}
   685  		var checks structs.IndexedHealthChecks
   686  		if err := a.RPC("Health.ServiceChecks", &chkReq, &checks); err != nil {
   687  			t.Fatalf("err: %v", err)
   688  		}
   689  		if len(checks.HealthChecks) != 1 {
   690  			t.Fatalf("bad: %v", checks)
   691  		}
   692  	}
   693  
   694  	{
   695  		// Multiple checks
   696  		srv := &structs.NodeService{
   697  			ID:      "redis",
   698  			Service: "redis",
   699  			Tags:    []string{"master"},
   700  			Port:    5000,
   701  		}
   702  		a.State.AddService(srv, "")
   703  
   704  		chk1 := &structs.HealthCheck{
   705  			Node:      a.Config.NodeName,
   706  			CheckID:   "redis:1",
   707  			Name:      "redis:1",
   708  			ServiceID: "redis",
   709  			Status:    api.HealthPassing,
   710  		}
   711  		a.State.AddCheck(chk1, "")
   712  
   713  		chk2 := &structs.HealthCheck{
   714  			Node:      a.Config.NodeName,
   715  			CheckID:   "redis:2",
   716  			Name:      "redis:2",
   717  			ServiceID: "redis",
   718  			Status:    api.HealthPassing,
   719  		}
   720  		a.State.AddCheck(chk2, "")
   721  
   722  		if err := a.State.SyncFull(); err != nil {
   723  			t.Fatal("sync failed: ", err)
   724  		}
   725  
   726  		// We should have 3 services (consul included)
   727  		svcReq := structs.NodeSpecificRequest{
   728  			Datacenter: "dc1",
   729  			Node:       a.Config.NodeName,
   730  		}
   731  		var services structs.IndexedNodeServices
   732  		if err := a.RPC("Catalog.NodeServices", &svcReq, &services); err != nil {
   733  			t.Fatalf("err: %v", err)
   734  		}
   735  		if len(services.NodeServices.Services) != 3 {
   736  			t.Fatalf("bad: %v", services.NodeServices.Services)
   737  		}
   738  
   739  		// We should have two health checks
   740  		chkReq := structs.ServiceSpecificRequest{
   741  			Datacenter:  "dc1",
   742  			ServiceName: "redis",
   743  		}
   744  		var checks structs.IndexedHealthChecks
   745  		if err := a.RPC("Health.ServiceChecks", &chkReq, &checks); err != nil {
   746  			t.Fatalf("err: %v", err)
   747  		}
   748  		if len(checks.HealthChecks) != 2 {
   749  			t.Fatalf("bad: %v", checks)
   750  		}
   751  	}
   752  }
   753  
   754  var testRegisterRules = `
   755   node "" {
   756   	policy = "write"
   757   }
   758  
   759   service "api" {
   760   	policy = "write"
   761   }
   762  
   763   service "consul" {
   764   	policy = "write"
   765   }
   766   `
   767  
   768  func TestAgentAntiEntropy_Services_ACLDeny(t *testing.T) {
   769  	t.Parallel()
   770  	a := &agent.TestAgent{Name: t.Name(), HCL: `
   771  		acl_datacenter = "dc1"
   772  		acl_master_token = "root"
   773  		acl_default_policy = "deny"
   774  		acl_enforce_version_8 = true`}
   775  	a.Start(t)
   776  	defer a.Shutdown()
   777  	testrpc.WaitForLeader(t, a.RPC, "dc1")
   778  
   779  	// Create the ACL
   780  	arg := structs.ACLRequest{
   781  		Datacenter: "dc1",
   782  		Op:         structs.ACLSet,
   783  		ACL: structs.ACL{
   784  			Name:  "User token",
   785  			Type:  structs.ACLTokenTypeClient,
   786  			Rules: testRegisterRules,
   787  		},
   788  		WriteRequest: structs.WriteRequest{
   789  			Token: "root",
   790  		},
   791  	}
   792  	var token string
   793  	if err := a.RPC("ACL.Apply", &arg, &token); err != nil {
   794  		t.Fatalf("err: %v", err)
   795  	}
   796  
   797  	// Create service (disallowed)
   798  	srv1 := &structs.NodeService{
   799  		ID:      "mysql",
   800  		Service: "mysql",
   801  		Tags:    []string{"master"},
   802  		Port:    5000,
   803  		Weights: &structs.Weights{
   804  			Passing: 1,
   805  			Warning: 1,
   806  		},
   807  	}
   808  	a.State.AddService(srv1, token)
   809  
   810  	// Create service (allowed)
   811  	srv2 := &structs.NodeService{
   812  		ID:      "api",
   813  		Service: "api",
   814  		Tags:    []string{"foo"},
   815  		Port:    5001,
   816  		Weights: &structs.Weights{
   817  			Passing: 1,
   818  			Warning: 0,
   819  		},
   820  	}
   821  	a.State.AddService(srv2, token)
   822  
   823  	if err := a.State.SyncFull(); err != nil {
   824  		t.Fatalf("err: %v", err)
   825  	}
   826  
   827  	// Verify that we are in sync
   828  	{
   829  		req := structs.NodeSpecificRequest{
   830  			Datacenter: "dc1",
   831  			Node:       a.Config.NodeName,
   832  			QueryOptions: structs.QueryOptions{
   833  				Token: "root",
   834  			},
   835  		}
   836  		var services structs.IndexedNodeServices
   837  		if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
   838  			t.Fatalf("err: %v", err)
   839  		}
   840  
   841  		// We should have 2 services (consul included)
   842  		if len(services.NodeServices.Services) != 2 {
   843  			t.Fatalf("bad: %v", services.NodeServices.Services)
   844  		}
   845  
   846  		// All the services should match
   847  		for id, serv := range services.NodeServices.Services {
   848  			serv.CreateIndex, serv.ModifyIndex = 0, 0
   849  			switch id {
   850  			case "mysql":
   851  				t.Fatalf("should not be permitted")
   852  			case "api":
   853  				if !reflect.DeepEqual(serv, srv2) {
   854  					t.Fatalf("bad: %#v %#v", serv, srv2)
   855  				}
   856  			case structs.ConsulServiceID:
   857  				// ignore
   858  			default:
   859  				t.Fatalf("unexpected service: %v", id)
   860  			}
   861  		}
   862  
   863  		if err := servicesInSync(a.State, 2); err != nil {
   864  			t.Fatal(err)
   865  		}
   866  	}
   867  
   868  	// Now remove the service and re-sync
   869  	a.State.RemoveService("api")
   870  	if err := a.State.SyncFull(); err != nil {
   871  		t.Fatalf("err: %v", err)
   872  	}
   873  
   874  	// Verify that we are in sync
   875  	{
   876  		req := structs.NodeSpecificRequest{
   877  			Datacenter: "dc1",
   878  			Node:       a.Config.NodeName,
   879  			QueryOptions: structs.QueryOptions{
   880  				Token: "root",
   881  			},
   882  		}
   883  		var services structs.IndexedNodeServices
   884  		if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
   885  			t.Fatalf("err: %v", err)
   886  		}
   887  
   888  		// We should have 1 service (just consul)
   889  		if len(services.NodeServices.Services) != 1 {
   890  			t.Fatalf("bad: %v", services.NodeServices.Services)
   891  		}
   892  
   893  		// All the services should match
   894  		for id, serv := range services.NodeServices.Services {
   895  			serv.CreateIndex, serv.ModifyIndex = 0, 0
   896  			switch id {
   897  			case "mysql":
   898  				t.Fatalf("should not be permitted")
   899  			case "api":
   900  				t.Fatalf("should be deleted")
   901  			case structs.ConsulServiceID:
   902  				// ignore
   903  			default:
   904  				t.Fatalf("unexpected service: %v", id)
   905  			}
   906  		}
   907  
   908  		if err := servicesInSync(a.State, 1); err != nil {
   909  			t.Fatal(err)
   910  		}
   911  	}
   912  
   913  	// Make sure the token got cleaned up.
   914  	if token := a.State.ServiceToken("api"); token != "" {
   915  		t.Fatalf("bad: %s", token)
   916  	}
   917  }
   918  
   919  func TestAgentAntiEntropy_Checks(t *testing.T) {
   920  	t.Parallel()
   921  	a := &agent.TestAgent{Name: t.Name()}
   922  	a.Start(t)
   923  	defer a.Shutdown()
   924  
   925  	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
   926  	// Register info
   927  	args := &structs.RegisterRequest{
   928  		Datacenter: "dc1",
   929  		Node:       a.Config.NodeName,
   930  		Address:    "127.0.0.1",
   931  	}
   932  
   933  	// Exists both, same (noop)
   934  	var out struct{}
   935  	chk1 := &structs.HealthCheck{
   936  		Node:    a.Config.NodeName,
   937  		CheckID: "mysql",
   938  		Name:    "mysql",
   939  		Status:  api.HealthPassing,
   940  	}
   941  	a.State.AddCheck(chk1, "")
   942  	args.Check = chk1
   943  	if err := a.RPC("Catalog.Register", args, &out); err != nil {
   944  		t.Fatalf("err: %v", err)
   945  	}
   946  
   947  	// Exists both, different (update)
   948  	chk2 := &structs.HealthCheck{
   949  		Node:    a.Config.NodeName,
   950  		CheckID: "redis",
   951  		Name:    "redis",
   952  		Status:  api.HealthPassing,
   953  	}
   954  	a.State.AddCheck(chk2, "")
   955  
   956  	chk2_mod := new(structs.HealthCheck)
   957  	*chk2_mod = *chk2
   958  	chk2_mod.Status = api.HealthCritical
   959  	args.Check = chk2_mod
   960  	if err := a.RPC("Catalog.Register", args, &out); err != nil {
   961  		t.Fatalf("err: %v", err)
   962  	}
   963  
   964  	// Exists local (create)
   965  	chk3 := &structs.HealthCheck{
   966  		Node:    a.Config.NodeName,
   967  		CheckID: "web",
   968  		Name:    "web",
   969  		Status:  api.HealthPassing,
   970  	}
   971  	a.State.AddCheck(chk3, "")
   972  
   973  	// Exists remote (delete)
   974  	chk4 := &structs.HealthCheck{
   975  		Node:    a.Config.NodeName,
   976  		CheckID: "lb",
   977  		Name:    "lb",
   978  		Status:  api.HealthPassing,
   979  	}
   980  	args.Check = chk4
   981  	if err := a.RPC("Catalog.Register", args, &out); err != nil {
   982  		t.Fatalf("err: %v", err)
   983  	}
   984  
   985  	// Exists local, in sync, remote missing (create)
   986  	chk5 := &structs.HealthCheck{
   987  		Node:    a.Config.NodeName,
   988  		CheckID: "cache",
   989  		Name:    "cache",
   990  		Status:  api.HealthPassing,
   991  	}
   992  	a.State.SetCheckState(&local.CheckState{
   993  		Check:  chk5,
   994  		InSync: true,
   995  	})
   996  
   997  	if err := a.State.SyncFull(); err != nil {
   998  		t.Fatalf("err: %v", err)
   999  	}
  1000  
  1001  	req := structs.NodeSpecificRequest{
  1002  		Datacenter: "dc1",
  1003  		Node:       a.Config.NodeName,
  1004  	}
  1005  	var checks structs.IndexedHealthChecks
  1006  
  1007  	// Verify that we are in sync
  1008  	if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
  1009  		t.Fatalf("err: %v", err)
  1010  	}
  1011  
  1012  	// We should have 5 checks (serf included)
  1013  	if len(checks.HealthChecks) != 5 {
  1014  		t.Fatalf("bad: %v", checks)
  1015  	}
  1016  
  1017  	// All the checks should match
  1018  	for _, chk := range checks.HealthChecks {
  1019  		chk.CreateIndex, chk.ModifyIndex = 0, 0
  1020  		switch chk.CheckID {
  1021  		case "mysql":
  1022  			if !reflect.DeepEqual(chk, chk1) {
  1023  				t.Fatalf("bad: %v %v", chk, chk1)
  1024  			}
  1025  		case "redis":
  1026  			if !reflect.DeepEqual(chk, chk2) {
  1027  				t.Fatalf("bad: %v %v", chk, chk2)
  1028  			}
  1029  		case "web":
  1030  			if !reflect.DeepEqual(chk, chk3) {
  1031  				t.Fatalf("bad: %v %v", chk, chk3)
  1032  			}
  1033  		case "cache":
  1034  			if !reflect.DeepEqual(chk, chk5) {
  1035  				t.Fatalf("bad: %v %v", chk, chk5)
  1036  			}
  1037  		case "serfHealth":
  1038  			// ignore
  1039  		default:
  1040  			t.Fatalf("unexpected check: %v", chk)
  1041  		}
  1042  	}
  1043  
  1044  	if err := checksInSync(a.State, 4); err != nil {
  1045  		t.Fatal(err)
  1046  	}
  1047  
  1048  	// Make sure we sent along our node info addresses when we synced.
  1049  	{
  1050  		req := structs.NodeSpecificRequest{
  1051  			Datacenter: "dc1",
  1052  			Node:       a.Config.NodeName,
  1053  		}
  1054  		var services structs.IndexedNodeServices
  1055  		if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
  1056  			t.Fatalf("err: %v", err)
  1057  		}
  1058  
  1059  		id := services.NodeServices.Node.ID
  1060  		addrs := services.NodeServices.Node.TaggedAddresses
  1061  		meta := services.NodeServices.Node.Meta
  1062  		delete(meta, structs.MetaSegmentKey) // Added later, not in config.
  1063  		verify.Values(t, "node id", id, a.Config.NodeID)
  1064  		verify.Values(t, "tagged addrs", addrs, a.Config.TaggedAddresses)
  1065  		verify.Values(t, "node meta", meta, a.Config.NodeMeta)
  1066  	}
  1067  
  1068  	// Remove one of the checks
  1069  	a.State.RemoveCheck("redis")
  1070  
  1071  	if err := a.State.SyncFull(); err != nil {
  1072  		t.Fatalf("err: %v", err)
  1073  	}
  1074  
  1075  	// Verify that we are in sync
  1076  	if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
  1077  		t.Fatalf("err: %v", err)
  1078  	}
  1079  
  1080  	// We should have 5 checks (serf included)
  1081  	if len(checks.HealthChecks) != 4 {
  1082  		t.Fatalf("bad: %v", checks)
  1083  	}
  1084  
  1085  	// All the checks should match
  1086  	for _, chk := range checks.HealthChecks {
  1087  		chk.CreateIndex, chk.ModifyIndex = 0, 0
  1088  		switch chk.CheckID {
  1089  		case "mysql":
  1090  			if !reflect.DeepEqual(chk, chk1) {
  1091  				t.Fatalf("bad: %v %v", chk, chk1)
  1092  			}
  1093  		case "web":
  1094  			if !reflect.DeepEqual(chk, chk3) {
  1095  				t.Fatalf("bad: %v %v", chk, chk3)
  1096  			}
  1097  		case "cache":
  1098  			if !reflect.DeepEqual(chk, chk5) {
  1099  				t.Fatalf("bad: %v %v", chk, chk5)
  1100  			}
  1101  		case "serfHealth":
  1102  			// ignore
  1103  		default:
  1104  			t.Fatalf("unexpected check: %v", chk)
  1105  		}
  1106  	}
  1107  
  1108  	if err := checksInSync(a.State, 3); err != nil {
  1109  		t.Fatal(err)
  1110  	}
  1111  }
  1112  
  1113  func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
  1114  	t.Parallel()
  1115  	dc := "dc1"
  1116  	a := &agent.TestAgent{Name: t.Name(), HCL: `
  1117  		acl_datacenter = "` + dc + `"
  1118  		acl_master_token = "root"
  1119  		acl_default_policy = "deny"
  1120  		acl_enforce_version_8 = true`}
  1121  	a.Start(t)
  1122  	defer a.Shutdown()
  1123  
  1124  	testrpc.WaitForLeader(t, a.RPC, dc)
  1125  
  1126  	// Create the ACL
  1127  	arg := structs.ACLRequest{
  1128  		Datacenter: dc,
  1129  		Op:         structs.ACLSet,
  1130  		ACL: structs.ACL{
  1131  			Name:  "User token",
  1132  			Type:  structs.ACLTokenTypeClient,
  1133  			Rules: testRegisterRules,
  1134  		},
  1135  		WriteRequest: structs.WriteRequest{
  1136  			Token: "root",
  1137  		},
  1138  	}
  1139  	var token string
  1140  	if err := a.RPC("ACL.Apply", &arg, &token); err != nil {
  1141  		t.Fatalf("err: %v", err)
  1142  	}
  1143  
  1144  	// Create services using the root token
  1145  	srv1 := &structs.NodeService{
  1146  		ID:      "mysql",
  1147  		Service: "mysql",
  1148  		Tags:    []string{"master"},
  1149  		Port:    5000,
  1150  		Weights: &structs.Weights{
  1151  			Passing: 1,
  1152  			Warning: 1,
  1153  		},
  1154  	}
  1155  	a.State.AddService(srv1, "root")
  1156  	srv2 := &structs.NodeService{
  1157  		ID:      "api",
  1158  		Service: "api",
  1159  		Tags:    []string{"foo"},
  1160  		Port:    5001,
  1161  		Weights: &structs.Weights{
  1162  			Passing: 1,
  1163  			Warning: 1,
  1164  		},
  1165  	}
  1166  	a.State.AddService(srv2, "root")
  1167  
  1168  	if err := a.State.SyncFull(); err != nil {
  1169  		t.Fatalf("err: %v", err)
  1170  	}
  1171  
  1172  	// Verify that we are in sync
  1173  	{
  1174  		req := structs.NodeSpecificRequest{
  1175  			Datacenter: dc,
  1176  			Node:       a.Config.NodeName,
  1177  			QueryOptions: structs.QueryOptions{
  1178  				Token: "root",
  1179  			},
  1180  		}
  1181  		var services structs.IndexedNodeServices
  1182  		if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
  1183  			t.Fatalf("err: %v", err)
  1184  		}
  1185  
  1186  		// We should have 3 services (consul included)
  1187  		if len(services.NodeServices.Services) != 3 {
  1188  			t.Fatalf("bad: %v", services.NodeServices.Services)
  1189  		}
  1190  
  1191  		// All the services should match
  1192  		for id, serv := range services.NodeServices.Services {
  1193  			serv.CreateIndex, serv.ModifyIndex = 0, 0
  1194  			switch id {
  1195  			case "mysql":
  1196  				if !reflect.DeepEqual(serv, srv1) {
  1197  					t.Fatalf("bad: %#v %#v", serv, srv1)
  1198  				}
  1199  			case "api":
  1200  				if !reflect.DeepEqual(serv, srv2) {
  1201  					t.Fatalf("bad: %#v %#v", serv, srv2)
  1202  				}
  1203  			case structs.ConsulServiceID:
  1204  				// ignore
  1205  			default:
  1206  				t.Fatalf("unexpected service: %v", id)
  1207  			}
  1208  		}
  1209  
  1210  		if err := servicesInSync(a.State, 2); err != nil {
  1211  			t.Fatal(err)
  1212  		}
  1213  	}
  1214  
  1215  	// This check won't be allowed.
  1216  	chk1 := &structs.HealthCheck{
  1217  		Node:        a.Config.NodeName,
  1218  		ServiceID:   "mysql",
  1219  		ServiceName: "mysql",
  1220  		ServiceTags: []string{"master"},
  1221  		CheckID:     "mysql-check",
  1222  		Name:        "mysql",
  1223  		Status:      api.HealthPassing,
  1224  	}
  1225  	a.State.AddCheck(chk1, token)
  1226  
  1227  	// This one will be allowed.
  1228  	chk2 := &structs.HealthCheck{
  1229  		Node:        a.Config.NodeName,
  1230  		ServiceID:   "api",
  1231  		ServiceName: "api",
  1232  		ServiceTags: []string{"foo"},
  1233  		CheckID:     "api-check",
  1234  		Name:        "api",
  1235  		Status:      api.HealthPassing,
  1236  	}
  1237  	a.State.AddCheck(chk2, token)
  1238  
  1239  	if err := a.State.SyncFull(); err != nil {
  1240  		t.Fatalf("err: %v", err)
  1241  	}
  1242  
  1243  	// Verify that we are in sync
  1244  	req := structs.NodeSpecificRequest{
  1245  		Datacenter: dc,
  1246  		Node:       a.Config.NodeName,
  1247  		QueryOptions: structs.QueryOptions{
  1248  			Token: "root",
  1249  		},
  1250  	}
  1251  	var checks structs.IndexedHealthChecks
  1252  	if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
  1253  		t.Fatalf("err: %v", err)
  1254  	}
  1255  
  1256  	// We should have 2 checks (serf included)
  1257  	if len(checks.HealthChecks) != 2 {
  1258  		t.Fatalf("bad: %v", checks)
  1259  	}
  1260  
  1261  	// All the checks should match
  1262  	for _, chk := range checks.HealthChecks {
  1263  		chk.CreateIndex, chk.ModifyIndex = 0, 0
  1264  		switch chk.CheckID {
  1265  		case "mysql-check":
  1266  			t.Fatalf("should not be permitted")
  1267  		case "api-check":
  1268  			if !reflect.DeepEqual(chk, chk2) {
  1269  				t.Fatalf("bad: %v %v", chk, chk2)
  1270  			}
  1271  		case "serfHealth":
  1272  			// ignore
  1273  		default:
  1274  			t.Fatalf("unexpected check: %v", chk)
  1275  		}
  1276  	}
  1277  
  1278  	if err := checksInSync(a.State, 2); err != nil {
  1279  		t.Fatal(err)
  1280  	}
  1281  
  1282  	// Now delete the check and wait for sync.
  1283  	a.State.RemoveCheck("api-check")
  1284  	if err := a.State.SyncFull(); err != nil {
  1285  		t.Fatalf("err: %v", err)
  1286  	}
  1287  
  1288  	// Verify that we are in sync
  1289  	{
  1290  		req := structs.NodeSpecificRequest{
  1291  			Datacenter: dc,
  1292  			Node:       a.Config.NodeName,
  1293  			QueryOptions: structs.QueryOptions{
  1294  				Token: "root",
  1295  			},
  1296  		}
  1297  		var checks structs.IndexedHealthChecks
  1298  		if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
  1299  			t.Fatalf("err: %v", err)
  1300  		}
  1301  
  1302  		// We should have 1 check (just serf)
  1303  		if len(checks.HealthChecks) != 1 {
  1304  			t.Fatalf("bad: %v", checks)
  1305  		}
  1306  
  1307  		// All the checks should match
  1308  		for _, chk := range checks.HealthChecks {
  1309  			chk.CreateIndex, chk.ModifyIndex = 0, 0
  1310  			switch chk.CheckID {
  1311  			case "mysql-check":
  1312  				t.Fatalf("should not be permitted")
  1313  			case "api-check":
  1314  				t.Fatalf("should be deleted")
  1315  			case "serfHealth":
  1316  				// ignore
  1317  			default:
  1318  				t.Fatalf("unexpected check: %v", chk)
  1319  			}
  1320  		}
  1321  	}
  1322  
  1323  	if err := checksInSync(a.State, 1); err != nil {
  1324  		t.Fatal(err)
  1325  	}
  1326  
  1327  	// Make sure the token got cleaned up.
  1328  	if token := a.State.CheckToken("api-check"); token != "" {
  1329  		t.Fatalf("bad: %s", token)
  1330  	}
  1331  }
  1332  
  1333  func TestAgent_UpdateCheck_DiscardOutput(t *testing.T) {
  1334  	t.Parallel()
  1335  	a := agent.NewTestAgent(t, t.Name(), `
  1336  		discard_check_output = true
  1337  		check_update_interval = "0s" # set to "0s" since otherwise output checks are deferred
  1338  	`)
  1339  	defer a.Shutdown()
  1340  	testrpc.WaitForLeader(t, a.RPC, "dc1")
  1341  
  1342  	inSync := func(id string) bool {
  1343  		s := a.State.CheckState(types.CheckID(id))
  1344  		if s == nil {
  1345  			return false
  1346  		}
  1347  		return s.InSync
  1348  	}
  1349  
  1350  	// register a check
  1351  	check := &structs.HealthCheck{
  1352  		Node:    a.Config.NodeName,
  1353  		CheckID: "web",
  1354  		Name:    "web",
  1355  		Status:  api.HealthPassing,
  1356  		Output:  "first output",
  1357  	}
  1358  	if err := a.State.AddCheck(check, ""); err != nil {
  1359  		t.Fatalf("bad: %s", err)
  1360  	}
  1361  	if err := a.State.SyncFull(); err != nil {
  1362  		t.Fatalf("bad: %s", err)
  1363  	}
  1364  	if !inSync("web") {
  1365  		t.Fatal("check should be in sync")
  1366  	}
  1367  
  1368  	// update the check with the same status but different output
  1369  	// and the check should still be in sync.
  1370  	a.State.UpdateCheck(check.CheckID, api.HealthPassing, "second output")
  1371  	if !inSync("web") {
  1372  		t.Fatal("check should be in sync")
  1373  	}
  1374  
  1375  	// disable discarding of check output and update the check again with different
  1376  	// output. Then the check should be out of sync.
  1377  	a.State.SetDiscardCheckOutput(false)
  1378  	a.State.UpdateCheck(check.CheckID, api.HealthPassing, "third output")
  1379  	if inSync("web") {
  1380  		t.Fatal("check should be out of sync")
  1381  	}
  1382  }
  1383  
  1384  func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
  1385  	t.Parallel()
  1386  	a := &agent.TestAgent{Name: t.Name(), HCL: `
  1387  		check_update_interval = "500ms"
  1388  	`}
  1389  	a.Start(t)
  1390  	defer a.Shutdown()
  1391  	testrpc.WaitForTestAgent(t, a.RPC, "dc1")
  1392  
  1393  	// Create a check
  1394  	check := &structs.HealthCheck{
  1395  		Node:    a.Config.NodeName,
  1396  		CheckID: "web",
  1397  		Name:    "web",
  1398  		Status:  api.HealthPassing,
  1399  		Output:  "",
  1400  	}
  1401  	a.State.AddCheck(check, "")
  1402  
  1403  	if err := a.State.SyncFull(); err != nil {
  1404  		t.Fatalf("err: %v", err)
  1405  	}
  1406  
  1407  	// Verify that we are in sync
  1408  	req := structs.NodeSpecificRequest{
  1409  		Datacenter: "dc1",
  1410  		Node:       a.Config.NodeName,
  1411  	}
  1412  	var checks structs.IndexedHealthChecks
  1413  	retry.Run(t, func(r *retry.R) {
  1414  		if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
  1415  			r.Fatalf("err: %v", err)
  1416  		}
  1417  		if got, want := len(checks.HealthChecks), 2; got != want {
  1418  			r.Fatalf("got %d health checks want %d", got, want)
  1419  		}
  1420  	})
  1421  
  1422  	// Update the check output! Should be deferred
  1423  	a.State.UpdateCheck("web", api.HealthPassing, "output")
  1424  
  1425  	// Should not update for 500 milliseconds
  1426  	time.Sleep(250 * time.Millisecond)
  1427  	if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
  1428  		t.Fatalf("err: %v", err)
  1429  	}
  1430  
  1431  	// Verify not updated
  1432  	for _, chk := range checks.HealthChecks {
  1433  		switch chk.CheckID {
  1434  		case "web":
  1435  			if chk.Output != "" {
  1436  				t.Fatalf("early update: %v", chk)
  1437  			}
  1438  		}
  1439  	}
  1440  	// Wait for a deferred update. TODO (slackpad) This isn't a great test
  1441  	// because we might be stuck in the random stagger from the full sync
  1442  	// after the leader election (~3 seconds) so it's easy to exceed the
  1443  	// default retry timeout here. Extending this makes the test a little
  1444  	// less flaky, but this isn't very clean for this first deferred update
  1445  	// since the full sync might pick it up, not the timer trigger. The
  1446  	// good news is that the later update below should be well past the full
  1447  	// sync so we are getting some coverage. We should rethink this a bit and
  1448  	// rework the deferred update stuff to be more testable.
  1449  	timer := &retry.Timer{Timeout: 6 * time.Second, Wait: 100 * time.Millisecond}
  1450  	retry.RunWith(timer, t, func(r *retry.R) {
  1451  		if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
  1452  			r.Fatal(err)
  1453  		}
  1454  
  1455  		// Verify updated
  1456  		for _, chk := range checks.HealthChecks {
  1457  			switch chk.CheckID {
  1458  			case "web":
  1459  				if chk.Output != "output" {
  1460  					r.Fatalf("no update: %v", chk)
  1461  				}
  1462  			}
  1463  		}
  1464  	})
  1465  
  1466  	// Change the output in the catalog to force it out of sync.
  1467  	eCopy := check.Clone()
  1468  	eCopy.Output = "changed"
  1469  	reg := structs.RegisterRequest{
  1470  		Datacenter:      a.Config.Datacenter,
  1471  		Node:            a.Config.NodeName,
  1472  		Address:         a.Config.AdvertiseAddrLAN.IP.String(),
  1473  		TaggedAddresses: a.Config.TaggedAddresses,
  1474  		Check:           eCopy,
  1475  		WriteRequest:    structs.WriteRequest{},
  1476  	}
  1477  	var out struct{}
  1478  	if err := a.RPC("Catalog.Register", &reg, &out); err != nil {
  1479  		t.Fatalf("err: %s", err)
  1480  	}
  1481  
  1482  	// Verify that the output is out of sync.
  1483  	if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
  1484  		t.Fatalf("err: %v", err)
  1485  	}
  1486  	for _, chk := range checks.HealthChecks {
  1487  		switch chk.CheckID {
  1488  		case "web":
  1489  			if chk.Output != "changed" {
  1490  				t.Fatalf("unexpected update: %v", chk)
  1491  			}
  1492  		}
  1493  	}
  1494  
  1495  	if err := a.State.SyncFull(); err != nil {
  1496  		t.Fatalf("err: %v", err)
  1497  	}
  1498  
  1499  	// Verify that the output was synced back to the agent's value.
  1500  	if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
  1501  		t.Fatalf("err: %v", err)
  1502  	}
  1503  	for _, chk := range checks.HealthChecks {
  1504  		switch chk.CheckID {
  1505  		case "web":
  1506  			if chk.Output != "output" {
  1507  				t.Fatalf("missed update: %v", chk)
  1508  			}
  1509  		}
  1510  	}
  1511  
  1512  	// Reset the catalog again.
  1513  	if err := a.RPC("Catalog.Register", &reg, &out); err != nil {
  1514  		t.Fatalf("err: %s", err)
  1515  	}
  1516  
  1517  	// Verify that the output is out of sync.
  1518  	if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
  1519  		t.Fatalf("err: %v", err)
  1520  	}
  1521  	for _, chk := range checks.HealthChecks {
  1522  		switch chk.CheckID {
  1523  		case "web":
  1524  			if chk.Output != "changed" {
  1525  				t.Fatalf("unexpected update: %v", chk)
  1526  			}
  1527  		}
  1528  	}
  1529  
  1530  	// Now make an update that should be deferred.
  1531  	a.State.UpdateCheck("web", api.HealthPassing, "deferred")
  1532  
  1533  	if err := a.State.SyncFull(); err != nil {
  1534  		t.Fatalf("err: %v", err)
  1535  	}
  1536  
  1537  	// Verify that the output is still out of sync since there's a deferred
  1538  	// update pending.
  1539  	if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
  1540  		t.Fatalf("err: %v", err)
  1541  	}
  1542  	for _, chk := range checks.HealthChecks {
  1543  		switch chk.CheckID {
  1544  		case "web":
  1545  			if chk.Output != "changed" {
  1546  				t.Fatalf("unexpected update: %v", chk)
  1547  			}
  1548  		}
  1549  	}
  1550  	// Wait for the deferred update.
  1551  	retry.Run(t, func(r *retry.R) {
  1552  		if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
  1553  			r.Fatal(err)
  1554  		}
  1555  
  1556  		// Verify updated
  1557  		for _, chk := range checks.HealthChecks {
  1558  			switch chk.CheckID {
  1559  			case "web":
  1560  				if chk.Output != "deferred" {
  1561  					r.Fatalf("no update: %v", chk)
  1562  				}
  1563  			}
  1564  		}
  1565  	})
  1566  
  1567  }
  1568  
  1569  func TestAgentAntiEntropy_NodeInfo(t *testing.T) {
  1570  	t.Parallel()
  1571  	nodeID := types.NodeID("40e4a748-2192-161a-0510-9bf59fe950b5")
  1572  	nodeMeta := map[string]string{
  1573  		"somekey": "somevalue",
  1574  	}
  1575  	a := &agent.TestAgent{Name: t.Name(), HCL: `
  1576  		node_id = "40e4a748-2192-161a-0510-9bf59fe950b5"
  1577  		node_meta {
  1578  			somekey = "somevalue"
  1579  		}`}
  1580  	a.Start(t)
  1581  	defer a.Shutdown()
  1582  	testrpc.WaitForLeader(t, a.RPC, "dc1")
  1583  
  1584  	// Register info
  1585  	args := &structs.RegisterRequest{
  1586  		Datacenter: "dc1",
  1587  		Node:       a.Config.NodeName,
  1588  		Address:    "127.0.0.1",
  1589  	}
  1590  	var out struct{}
  1591  	if err := a.RPC("Catalog.Register", args, &out); err != nil {
  1592  		t.Fatalf("err: %v", err)
  1593  	}
  1594  
  1595  	if err := a.State.SyncFull(); err != nil {
  1596  		t.Fatalf("err: %v", err)
  1597  	}
  1598  
  1599  	req := structs.NodeSpecificRequest{
  1600  		Datacenter: "dc1",
  1601  		Node:       a.Config.NodeName,
  1602  	}
  1603  	var services structs.IndexedNodeServices
  1604  	if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
  1605  		t.Fatalf("err: %v", err)
  1606  	}
  1607  
  1608  	id := services.NodeServices.Node.ID
  1609  	addrs := services.NodeServices.Node.TaggedAddresses
  1610  	meta := services.NodeServices.Node.Meta
  1611  	delete(meta, structs.MetaSegmentKey) // Added later, not in config.
  1612  	if id != a.Config.NodeID ||
  1613  		!reflect.DeepEqual(addrs, a.Config.TaggedAddresses) ||
  1614  		!reflect.DeepEqual(meta, a.Config.NodeMeta) {
  1615  		t.Fatalf("bad: %v", services.NodeServices.Node)
  1616  	}
  1617  
  1618  	// Blow away the catalog version of the node info
  1619  	if err := a.RPC("Catalog.Register", args, &out); err != nil {
  1620  		t.Fatalf("err: %v", err)
  1621  	}
  1622  
  1623  	if err := a.State.SyncFull(); err != nil {
  1624  		t.Fatalf("err: %v", err)
  1625  	}
  1626  
  1627  	// Wait for the sync - this should have been a sync of just the node info
  1628  	if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
  1629  		t.Fatalf("err: %v", err)
  1630  	}
  1631  
  1632  	{
  1633  		id := services.NodeServices.Node.ID
  1634  		addrs := services.NodeServices.Node.TaggedAddresses
  1635  		meta := services.NodeServices.Node.Meta
  1636  		delete(meta, structs.MetaSegmentKey) // Added later, not in config.
  1637  		if id != nodeID ||
  1638  			!reflect.DeepEqual(addrs, a.Config.TaggedAddresses) ||
  1639  			!reflect.DeepEqual(meta, nodeMeta) {
  1640  			t.Fatalf("bad: %v", services.NodeServices.Node)
  1641  		}
  1642  	}
  1643  }
  1644  
  1645  func TestAgent_ServiceTokens(t *testing.T) {
  1646  	t.Parallel()
  1647  
  1648  	tokens := new(token.Store)
  1649  	tokens.UpdateUserToken("default", token.TokenSourceConfig)
  1650  	cfg := config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`)
  1651  	l := local.NewState(agent.LocalConfig(cfg), nil, tokens)
  1652  	l.TriggerSyncChanges = func() {}
  1653  
  1654  	l.AddService(&structs.NodeService{ID: "redis"}, "")
  1655  
  1656  	// Returns default when no token is set
  1657  	if token := l.ServiceToken("redis"); token != "default" {
  1658  		t.Fatalf("bad: %s", token)
  1659  	}
  1660  
  1661  	// Returns configured token
  1662  	l.AddService(&structs.NodeService{ID: "redis"}, "abc123")
  1663  	if token := l.ServiceToken("redis"); token != "abc123" {
  1664  		t.Fatalf("bad: %s", token)
  1665  	}
  1666  
  1667  	// Keeps token around for the delete
  1668  	l.RemoveService("redis")
  1669  	if token := l.ServiceToken("redis"); token != "abc123" {
  1670  		t.Fatalf("bad: %s", token)
  1671  	}
  1672  }
  1673  
  1674  func TestAgent_CheckTokens(t *testing.T) {
  1675  	t.Parallel()
  1676  
  1677  	tokens := new(token.Store)
  1678  	tokens.UpdateUserToken("default", token.TokenSourceConfig)
  1679  	cfg := config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`)
  1680  	l := local.NewState(agent.LocalConfig(cfg), nil, tokens)
  1681  	l.TriggerSyncChanges = func() {}
  1682  
  1683  	// Returns default when no token is set
  1684  	l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("mem")}, "")
  1685  	if token := l.CheckToken("mem"); token != "default" {
  1686  		t.Fatalf("bad: %s", token)
  1687  	}
  1688  
  1689  	// Returns configured token
  1690  	l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("mem")}, "abc123")
  1691  	if token := l.CheckToken("mem"); token != "abc123" {
  1692  		t.Fatalf("bad: %s", token)
  1693  	}
  1694  
  1695  	// Keeps token around for the delete
  1696  	l.RemoveCheck("mem")
  1697  	if token := l.CheckToken("mem"); token != "abc123" {
  1698  		t.Fatalf("bad: %s", token)
  1699  	}
  1700  }
  1701  
  1702  func TestAgent_CheckCriticalTime(t *testing.T) {
  1703  	t.Parallel()
  1704  	cfg := config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`)
  1705  	l := local.NewState(agent.LocalConfig(cfg), nil, new(token.Store))
  1706  	l.TriggerSyncChanges = func() {}
  1707  
  1708  	svc := &structs.NodeService{ID: "redis", Service: "redis", Port: 8000}
  1709  	l.AddService(svc, "")
  1710  
  1711  	// Add a passing check and make sure it's not critical.
  1712  	checkID := types.CheckID("redis:1")
  1713  	chk := &structs.HealthCheck{
  1714  		Node:      "node",
  1715  		CheckID:   checkID,
  1716  		Name:      "redis:1",
  1717  		ServiceID: "redis",
  1718  		Status:    api.HealthPassing,
  1719  	}
  1720  	l.AddCheck(chk, "")
  1721  	if checks := l.CriticalCheckStates(); len(checks) > 0 {
  1722  		t.Fatalf("should not have any critical checks")
  1723  	}
  1724  
  1725  	// Set it to warning and make sure that doesn't show up as critical.
  1726  	l.UpdateCheck(checkID, api.HealthWarning, "")
  1727  	if checks := l.CriticalCheckStates(); len(checks) > 0 {
  1728  		t.Fatalf("should not have any critical checks")
  1729  	}
  1730  
  1731  	// Fail the check and make sure the time looks reasonable.
  1732  	l.UpdateCheck(checkID, api.HealthCritical, "")
  1733  	if c, ok := l.CriticalCheckStates()[checkID]; !ok {
  1734  		t.Fatalf("should have a critical check")
  1735  	} else if c.CriticalFor() > time.Millisecond {
  1736  		t.Fatalf("bad: %#v", c)
  1737  	}
  1738  
  1739  	// Wait a while, then fail it again and make sure the time keeps track
  1740  	// of the initial failure, and doesn't reset here.
  1741  	time.Sleep(50 * time.Millisecond)
  1742  	l.UpdateCheck(chk.CheckID, api.HealthCritical, "")
  1743  	if c, ok := l.CriticalCheckStates()[checkID]; !ok {
  1744  		t.Fatalf("should have a critical check")
  1745  	} else if c.CriticalFor() < 25*time.Millisecond ||
  1746  		c.CriticalFor() > 75*time.Millisecond {
  1747  		t.Fatalf("bad: %#v", c)
  1748  	}
  1749  
  1750  	// Set it passing again.
  1751  	l.UpdateCheck(checkID, api.HealthPassing, "")
  1752  	if checks := l.CriticalCheckStates(); len(checks) > 0 {
  1753  		t.Fatalf("should not have any critical checks")
  1754  	}
  1755  
  1756  	// Fail the check and make sure the time looks like it started again
  1757  	// from the latest failure, not the original one.
  1758  	l.UpdateCheck(checkID, api.HealthCritical, "")
  1759  	if c, ok := l.CriticalCheckStates()[checkID]; !ok {
  1760  		t.Fatalf("should have a critical check")
  1761  	} else if c.CriticalFor() > time.Millisecond {
  1762  		t.Fatalf("bad: %#v", c)
  1763  	}
  1764  }
  1765  
  1766  func TestAgent_AddCheckFailure(t *testing.T) {
  1767  	t.Parallel()
  1768  	cfg := config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`)
  1769  	l := local.NewState(agent.LocalConfig(cfg), nil, new(token.Store))
  1770  	l.TriggerSyncChanges = func() {}
  1771  
  1772  	// Add a check for a service that does not exist and verify that it fails
  1773  	checkID := types.CheckID("redis:1")
  1774  	chk := &structs.HealthCheck{
  1775  		Node:      "node",
  1776  		CheckID:   checkID,
  1777  		Name:      "redis:1",
  1778  		ServiceID: "redis",
  1779  		Status:    api.HealthPassing,
  1780  	}
  1781  	wantErr := errors.New(`Check "redis:1" refers to non-existent service "redis"`)
  1782  	if got, want := l.AddCheck(chk, ""), wantErr; !reflect.DeepEqual(got, want) {
  1783  		t.Fatalf("got error %q want %q", got, want)
  1784  	}
  1785  }
  1786  
  1787  func TestAgent_AliasCheck(t *testing.T) {
  1788  	t.Parallel()
  1789  
  1790  	require := require.New(t)
  1791  	cfg := config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`)
  1792  	l := local.NewState(agent.LocalConfig(cfg), nil, new(token.Store))
  1793  	l.TriggerSyncChanges = func() {}
  1794  
  1795  	// Add checks
  1796  	require.NoError(l.AddService(&structs.NodeService{Service: "s1"}, ""))
  1797  	require.NoError(l.AddService(&structs.NodeService{Service: "s2"}, ""))
  1798  	require.NoError(l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("c1"), ServiceID: "s1"}, ""))
  1799  	require.NoError(l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("c2"), ServiceID: "s2"}, ""))
  1800  
  1801  	// Add an alias
  1802  	notifyCh := make(chan struct{}, 1)
  1803  	require.NoError(l.AddAliasCheck(types.CheckID("a1"), "s1", notifyCh))
  1804  
  1805  	// Update and verify we get notified
  1806  	l.UpdateCheck(types.CheckID("c1"), api.HealthCritical, "")
  1807  	select {
  1808  	case <-notifyCh:
  1809  	default:
  1810  		t.Fatal("notify not received")
  1811  	}
  1812  
  1813  	// Update again and verify we do not get notified
  1814  	l.UpdateCheck(types.CheckID("c1"), api.HealthCritical, "")
  1815  	select {
  1816  	case <-notifyCh:
  1817  		t.Fatal("notify received")
  1818  	default:
  1819  	}
  1820  
  1821  	// Update other check and verify we do not get notified
  1822  	l.UpdateCheck(types.CheckID("c2"), api.HealthCritical, "")
  1823  	select {
  1824  	case <-notifyCh:
  1825  		t.Fatal("notify received")
  1826  	default:
  1827  	}
  1828  
  1829  	// Update change and verify we get notified
  1830  	l.UpdateCheck(types.CheckID("c1"), api.HealthPassing, "")
  1831  	select {
  1832  	case <-notifyCh:
  1833  	default:
  1834  		t.Fatal("notify not received")
  1835  	}
  1836  }
  1837  
  1838  func TestAgent_sendCoordinate(t *testing.T) {
  1839  	t.Parallel()
  1840  	a := agent.NewTestAgent(t, t.Name(), `
  1841  		sync_coordinate_interval_min = "1ms"
  1842  		sync_coordinate_rate_target = 10.0
  1843  		consul = {
  1844  			coordinate = {
  1845  				update_period = "100ms"
  1846  				update_batch_size = 10
  1847  				update_max_batches = 1
  1848  			}
  1849  		}
  1850  	`)
  1851  	defer a.Shutdown()
  1852  	testrpc.WaitForLeader(t, a.RPC, "dc1")
  1853  
  1854  	t.Logf("%d %d %s",
  1855  		a.Config.ConsulCoordinateUpdateBatchSize,
  1856  		a.Config.ConsulCoordinateUpdateMaxBatches,
  1857  		a.Config.ConsulCoordinateUpdatePeriod.String())
  1858  
  1859  	// Make sure the coordinate is present.
  1860  	req := structs.DCSpecificRequest{
  1861  		Datacenter: a.Config.Datacenter,
  1862  	}
  1863  	var reply structs.IndexedCoordinates
  1864  	retry.Run(t, func(r *retry.R) {
  1865  		if err := a.RPC("Coordinate.ListNodes", &req, &reply); err != nil {
  1866  			r.Fatalf("err: %s", err)
  1867  		}
  1868  		if len(reply.Coordinates) != 1 {
  1869  			r.Fatalf("expected a coordinate: %v", reply)
  1870  		}
  1871  		coord := reply.Coordinates[0]
  1872  		if coord.Node != a.Config.NodeName || coord.Coord == nil {
  1873  			r.Fatalf("bad: %v", coord)
  1874  		}
  1875  	})
  1876  }
  1877  
  1878  func servicesInSync(state *local.State, wantServices int) error {
  1879  	services := state.ServiceStates()
  1880  	if got, want := len(services), wantServices; got != want {
  1881  		return fmt.Errorf("got %d services want %d", got, want)
  1882  	}
  1883  	for id, s := range services {
  1884  		if !s.InSync {
  1885  			return fmt.Errorf("service %q should be in sync", id)
  1886  		}
  1887  	}
  1888  	return nil
  1889  }
  1890  
  1891  func checksInSync(state *local.State, wantChecks int) error {
  1892  	checks := state.CheckStates()
  1893  	if got, want := len(checks), wantChecks; got != want {
  1894  		return fmt.Errorf("got %d checks want %d", got, want)
  1895  	}
  1896  	for id, c := range checks {
  1897  		if !c.InSync {
  1898  			return fmt.Errorf("check %q should be in sync", id)
  1899  		}
  1900  	}
  1901  	return nil
  1902  }
  1903  
  1904  func TestState_Notify(t *testing.T) {
  1905  	t.Parallel()
  1906  
  1907  	state := local.NewState(local.Config{},
  1908  		log.New(os.Stderr, "", log.LstdFlags), &token.Store{})
  1909  
  1910  	// Stub state syncing
  1911  	state.TriggerSyncChanges = func() {}
  1912  
  1913  	require := require.New(t)
  1914  	assert := assert.New(t)
  1915  
  1916  	// Register a notifier
  1917  	notifyCh := make(chan struct{}, 1)
  1918  	state.Notify(notifyCh)
  1919  	defer state.StopNotify(notifyCh)
  1920  	assert.Empty(notifyCh)
  1921  	drainCh(notifyCh)
  1922  
  1923  	// Add a service
  1924  	err := state.AddService(&structs.NodeService{
  1925  		Service: "web",
  1926  	}, "fake-token-web")
  1927  	require.NoError(err)
  1928  
  1929  	// Should have a notification
  1930  	assert.NotEmpty(notifyCh)
  1931  	drainCh(notifyCh)
  1932  
  1933  	// Re-Add same service
  1934  	err = state.AddService(&structs.NodeService{
  1935  		Service: "web",
  1936  		Port:    4444,
  1937  	}, "fake-token-web")
  1938  	require.NoError(err)
  1939  
  1940  	// Should have a notification
  1941  	assert.NotEmpty(notifyCh)
  1942  	drainCh(notifyCh)
  1943  
  1944  	// Remove service
  1945  	require.NoError(state.RemoveService("web"))
  1946  
  1947  	// Should have a notification
  1948  	assert.NotEmpty(notifyCh)
  1949  	drainCh(notifyCh)
  1950  
  1951  	// Stopping should... stop
  1952  	state.StopNotify(notifyCh)
  1953  
  1954  	// Add a service
  1955  	err = state.AddService(&structs.NodeService{
  1956  		Service: "web",
  1957  	}, "fake-token-web")
  1958  	require.NoError(err)
  1959  
  1960  	// Should NOT have a notification
  1961  	assert.Empty(notifyCh)
  1962  	drainCh(notifyCh)
  1963  }
  1964  
  1965  func TestStateProxyManagement(t *testing.T) {
  1966  	t.Parallel()
  1967  
  1968  	state := local.NewState(local.Config{
  1969  		ProxyBindMinPort: 20000,
  1970  		ProxyBindMaxPort: 20001,
  1971  	}, log.New(os.Stderr, "", log.LstdFlags), &token.Store{})
  1972  
  1973  	// Stub state syncing
  1974  	state.TriggerSyncChanges = func() {}
  1975  
  1976  	p1 := structs.ConnectManagedProxy{
  1977  		ExecMode:        structs.ProxyExecModeDaemon,
  1978  		Command:         []string{"consul", "connect", "proxy"},
  1979  		TargetServiceID: "web",
  1980  	}
  1981  
  1982  	require := require.New(t)
  1983  	assert := assert.New(t)
  1984  
  1985  	_, err := state.AddProxy(&p1, "fake-token", "")
  1986  	require.Error(err, "should fail as the target service isn't registered")
  1987  
  1988  	// Sanity check done, lets add a couple of target services to the state
  1989  	err = state.AddService(&structs.NodeService{
  1990  		Service: "web",
  1991  	}, "fake-token-web")
  1992  	require.NoError(err)
  1993  	err = state.AddService(&structs.NodeService{
  1994  		Service: "cache",
  1995  	}, "fake-token-cache")
  1996  	require.NoError(err)
  1997  	require.NoError(err)
  1998  	err = state.AddService(&structs.NodeService{
  1999  		Service: "db",
  2000  	}, "fake-token-db")
  2001  	require.NoError(err)
  2002  
  2003  	// Should work now
  2004  	pstate, err := state.AddProxy(&p1, "fake-token", "")
  2005  	require.NoError(err)
  2006  
  2007  	svc := pstate.Proxy.ProxyService
  2008  	assert.Equal("web-proxy", svc.ID)
  2009  	assert.Equal("web-proxy", svc.Service)
  2010  	assert.Equal(structs.ServiceKindConnectProxy, svc.Kind)
  2011  	assert.Equal("web", svc.Proxy.DestinationServiceName)
  2012  	assert.Equal("", svc.Address, "should have empty address by default")
  2013  	// Port is non-deterministic but could be either of 20000 or 20001
  2014  	assert.Contains([]int{20000, 20001}, svc.Port)
  2015  
  2016  	{
  2017  		// Re-registering same proxy again should not pick a random port but re-use
  2018  		// the assigned one. It should also keep the same proxy token since we don't
  2019  		// want to force restart for config change.
  2020  		pstateDup, err := state.AddProxy(&p1, "fake-token", "")
  2021  		require.NoError(err)
  2022  		svcDup := pstateDup.Proxy.ProxyService
  2023  
  2024  		assert.Equal("web-proxy", svcDup.ID)
  2025  		assert.Equal("web-proxy", svcDup.Service)
  2026  		assert.Equal(structs.ServiceKindConnectProxy, svcDup.Kind)
  2027  		assert.Equal("web", svcDup.Proxy.DestinationServiceName)
  2028  		assert.Equal("", svcDup.Address, "should have empty address by default")
  2029  		// Port must be same as before
  2030  		assert.Equal(svc.Port, svcDup.Port)
  2031  		// Same ProxyToken
  2032  		assert.Equal(pstate.ProxyToken, pstateDup.ProxyToken)
  2033  	}
  2034  
  2035  	// Let's register a notifier now
  2036  	notifyCh := make(chan struct{}, 1)
  2037  	state.NotifyProxy(notifyCh)
  2038  	defer state.StopNotifyProxy(notifyCh)
  2039  	assert.Empty(notifyCh)
  2040  	drainCh(notifyCh)
  2041  
  2042  	// Second proxy should claim other port
  2043  	p2 := p1
  2044  	p2.TargetServiceID = "cache"
  2045  	pstate2, err := state.AddProxy(&p2, "fake-token", "")
  2046  	require.NoError(err)
  2047  	svc2 := pstate2.Proxy.ProxyService
  2048  	assert.Contains([]int{20000, 20001}, svc2.Port)
  2049  	assert.NotEqual(svc.Port, svc2.Port)
  2050  
  2051  	// Should have a notification
  2052  	assert.NotEmpty(notifyCh)
  2053  	drainCh(notifyCh)
  2054  
  2055  	// Store this for later
  2056  	p2token := state.Proxy(svc2.ID).ProxyToken
  2057  
  2058  	// Third proxy should fail as all ports are used
  2059  	p3 := p1
  2060  	p3.TargetServiceID = "db"
  2061  	_, err = state.AddProxy(&p3, "fake-token", "")
  2062  	require.Error(err)
  2063  
  2064  	// Should have a notification but we'll do nothing so that the next
  2065  	// receive should block (we set cap == 1 above)
  2066  
  2067  	// But if we set a port explicitly it should be OK
  2068  	p3.Config = map[string]interface{}{
  2069  		"bind_port":    1234,
  2070  		"bind_address": "0.0.0.0",
  2071  	}
  2072  	pstate3, err := state.AddProxy(&p3, "fake-token", "")
  2073  	require.NoError(err)
  2074  	svc3 := pstate3.Proxy.ProxyService
  2075  	require.Equal("0.0.0.0", svc3.Address)
  2076  	require.Equal(1234, svc3.Port)
  2077  
  2078  	// Should have a notification
  2079  	assert.NotEmpty(notifyCh)
  2080  	drainCh(notifyCh)
  2081  
  2082  	// Update config of an already registered proxy should work
  2083  	p3updated := p3
  2084  	p3updated.Config["foo"] = "bar"
  2085  	// Setup multiple watchers who should all witness the change
  2086  	gotP3 := state.Proxy(svc3.ID)
  2087  	require.NotNil(gotP3)
  2088  	var ws memdb.WatchSet
  2089  	ws.Add(gotP3.WatchCh)
  2090  	pstate3, err = state.AddProxy(&p3updated, "fake-token", "")
  2091  	require.NoError(err)
  2092  	svc3 = pstate3.Proxy.ProxyService
  2093  	require.Equal("0.0.0.0", svc3.Address)
  2094  	require.Equal(1234, svc3.Port)
  2095  	gotProxy3 := state.Proxy(svc3.ID)
  2096  	require.NotNil(gotProxy3)
  2097  	require.Equal(p3updated.Config, gotProxy3.Proxy.Config)
  2098  	assert.False(ws.Watch(time.After(500*time.Millisecond)),
  2099  		"watch should have fired so ws.Watch should not timeout")
  2100  
  2101  	drainCh(notifyCh)
  2102  
  2103  	// Remove one of the auto-assigned proxies
  2104  	_, err = state.RemoveProxy(svc2.ID)
  2105  	require.NoError(err)
  2106  
  2107  	// Should have a notification
  2108  	assert.NotEmpty(notifyCh)
  2109  	drainCh(notifyCh)
  2110  
  2111  	// Should be able to create a new proxy for that service with the port (it
  2112  	// should have been "freed").
  2113  	p4 := p2
  2114  	pstate4, err := state.AddProxy(&p4, "fake-token", "")
  2115  	require.NoError(err)
  2116  	svc4 := pstate4.Proxy.ProxyService
  2117  	assert.Contains([]int{20000, 20001}, svc2.Port)
  2118  	assert.Equal(svc4.Port, svc2.Port, "should get the same port back that we freed")
  2119  
  2120  	// Remove a proxy that doesn't exist should error
  2121  	_, err = state.RemoveProxy("nope")
  2122  	require.Error(err)
  2123  
  2124  	assert.Equal(&p4, state.Proxy(p4.ProxyService.ID).Proxy,
  2125  		"should fetch the right proxy details")
  2126  	assert.Nil(state.Proxy("nope"))
  2127  
  2128  	proxies := state.Proxies()
  2129  	assert.Len(proxies, 3)
  2130  	assert.Equal(&p1, proxies[svc.ID].Proxy)
  2131  	assert.Equal(&p4, proxies[svc4.ID].Proxy)
  2132  	assert.Equal(&p3, proxies[svc3.ID].Proxy)
  2133  
  2134  	tokens := make([]string, 4)
  2135  	tokens[0] = state.Proxy(svc.ID).ProxyToken
  2136  	// p2 not registered anymore but lets make sure p4 got a new token when it
  2137  	// re-registered with same ID.
  2138  	tokens[1] = p2token
  2139  	tokens[2] = state.Proxy(svc2.ID).ProxyToken
  2140  	tokens[3] = state.Proxy(svc3.ID).ProxyToken
  2141  
  2142  	// Quick check all are distinct
  2143  	for i := 0; i < len(tokens)-1; i++ {
  2144  		assert.Len(tokens[i], 36) // Sanity check for UUIDish thing.
  2145  		for j := i + 1; j < len(tokens); j++ {
  2146  			assert.NotEqual(tokens[i], tokens[j], "tokens for proxy %d and %d match",
  2147  				i+1, j+1)
  2148  		}
  2149  	}
  2150  }
  2151  
  2152  // Tests the logic for retaining tokens and ports through restore (i.e.
  2153  // proxy-service already restored and token passed in externally)
  2154  func TestStateProxyRestore(t *testing.T) {
  2155  	t.Parallel()
  2156  
  2157  	state := local.NewState(local.Config{
  2158  		// Wide random range to make it very unlikely to pass by chance
  2159  		ProxyBindMinPort: 10000,
  2160  		ProxyBindMaxPort: 20000,
  2161  	}, log.New(os.Stderr, "", log.LstdFlags), &token.Store{})
  2162  
  2163  	// Stub state syncing
  2164  	state.TriggerSyncChanges = func() {}
  2165  
  2166  	webSvc := structs.NodeService{
  2167  		Service: "web",
  2168  	}
  2169  
  2170  	p1 := structs.ConnectManagedProxy{
  2171  		ExecMode:        structs.ProxyExecModeDaemon,
  2172  		Command:         []string{"consul", "connect", "proxy"},
  2173  		TargetServiceID: "web",
  2174  	}
  2175  
  2176  	p2 := p1
  2177  
  2178  	require := require.New(t)
  2179  	assert := assert.New(t)
  2180  
  2181  	// Add a target service
  2182  	require.NoError(state.AddService(&webSvc, "fake-token-web"))
  2183  
  2184  	// Add the proxy for first time to get the proper service definition to
  2185  	// register
  2186  	pstate, err := state.AddProxy(&p1, "fake-token", "")
  2187  	require.NoError(err)
  2188  
  2189  	// Now start again with a brand new state
  2190  	state2 := local.NewState(local.Config{
  2191  		// Wide random range to make it very unlikely to pass by chance
  2192  		ProxyBindMinPort: 10000,
  2193  		ProxyBindMaxPort: 20000,
  2194  	}, log.New(os.Stderr, "", log.LstdFlags), &token.Store{})
  2195  
  2196  	// Stub state syncing
  2197  	state2.TriggerSyncChanges = func() {}
  2198  
  2199  	// Register the target service
  2200  	require.NoError(state2.AddService(&webSvc, "fake-token-web"))
  2201  
  2202  	// "Restore" the proxy service
  2203  	require.NoError(state.AddService(p1.ProxyService, "fake-token-web"))
  2204  
  2205  	// Now we can AddProxy with the "restored" token
  2206  	pstate2, err := state.AddProxy(&p2, "fake-token", pstate.ProxyToken)
  2207  	require.NoError(err)
  2208  
  2209  	// Check it still has the same port and token as before
  2210  	assert.Equal(pstate.ProxyToken, pstate2.ProxyToken)
  2211  	assert.Equal(p1.ProxyService.Port, p2.ProxyService.Port)
  2212  }
  2213  
  2214  // drainCh drains a channel by reading messages until it would block.
  2215  func drainCh(ch chan struct{}) {
  2216  	for {
  2217  		select {
  2218  		case <-ch:
  2219  		default:
  2220  			return
  2221  		}
  2222  	}
  2223  }