github.imxd.top/hashicorp/consul@v1.4.5/agent/proxycfg/manager_test.go (about)

     1  package proxycfg
     2  
     3  import (
     4  	"log"
     5  	"os"
     6  	"testing"
     7  	"time"
     8  
     9  	"github.com/stretchr/testify/require"
    10  
    11  	"github.com/hashicorp/consul/agent/cache"
    12  	cachetype "github.com/hashicorp/consul/agent/cache-types"
    13  	"github.com/hashicorp/consul/agent/local"
    14  	"github.com/hashicorp/consul/agent/structs"
    15  	"github.com/hashicorp/consul/agent/token"
    16  )
    17  
    18  // assertLastReqArgs verifies that each request type had the correct source
    19  // parameters (e.g. Datacenter name) and token.
    20  func assertLastReqArgs(t *testing.T, types *TestCacheTypes, token string, source *structs.QuerySource) {
    21  	t.Helper()
    22  	// Roots needs correct DC and token
    23  	rootReq := types.roots.lastReq.Load()
    24  	require.IsType(t, rootReq, &structs.DCSpecificRequest{})
    25  	require.Equal(t, token, rootReq.(*structs.DCSpecificRequest).Token)
    26  	require.Equal(t, source.Datacenter, rootReq.(*structs.DCSpecificRequest).Datacenter)
    27  
    28  	// Leaf needs correct DC and token
    29  	leafReq := types.leaf.lastReq.Load()
    30  	require.IsType(t, leafReq, &cachetype.ConnectCALeafRequest{})
    31  	require.Equal(t, token, leafReq.(*cachetype.ConnectCALeafRequest).Token)
    32  	require.Equal(t, source.Datacenter, leafReq.(*cachetype.ConnectCALeafRequest).Datacenter)
    33  
    34  	// Intentions needs correct DC and token
    35  	intReq := types.intentions.lastReq.Load()
    36  	require.IsType(t, intReq, &structs.IntentionQueryRequest{})
    37  	require.Equal(t, token, intReq.(*structs.IntentionQueryRequest).Token)
    38  	require.Equal(t, source.Datacenter, intReq.(*structs.IntentionQueryRequest).Datacenter)
    39  }
    40  
    41  func TestManager_BasicLifecycle(t *testing.T) {
    42  	// Use a mocked cache to make life simpler
    43  	types := NewTestCacheTypes(t)
    44  	c := TestCacheWithTypes(t, types)
    45  
    46  	require := require.New(t)
    47  
    48  	roots, leaf := TestCerts(t)
    49  
    50  	// Setup initial values
    51  	types.roots.value.Store(roots)
    52  	types.leaf.value.Store(leaf)
    53  	types.intentions.value.Store(TestIntentions(t))
    54  	types.health.value.Store(
    55  		&structs.IndexedCheckServiceNodes{
    56  			Nodes: TestUpstreamNodes(t),
    57  		})
    58  
    59  	logger := log.New(os.Stderr, "", log.LstdFlags)
    60  	state := local.NewState(local.Config{}, logger, &token.Store{})
    61  	source := &structs.QuerySource{
    62  		Node:       "node1",
    63  		Datacenter: "dc1",
    64  	}
    65  
    66  	// Stub state syncing
    67  	state.TriggerSyncChanges = func() {}
    68  
    69  	// Create manager
    70  	m, err := NewManager(ManagerConfig{c, state, source, logger})
    71  	require.NoError(err)
    72  
    73  	// And run it
    74  	go func() {
    75  		err := m.Run()
    76  		require.NoError(err)
    77  	}()
    78  
    79  	// Register a proxy for "web"
    80  	webProxy := &structs.NodeService{
    81  		Kind:    structs.ServiceKindConnectProxy,
    82  		ID:      "web-sidecar-proxy",
    83  		Service: "web-sidecar-proxy",
    84  		Port:    9999,
    85  		Proxy: structs.ConnectProxyConfig{
    86  			DestinationServiceID:   "web",
    87  			DestinationServiceName: "web",
    88  			LocalServiceAddress:    "127.0.0.1",
    89  			LocalServicePort:       8080,
    90  			Config: map[string]interface{}{
    91  				"foo": "bar",
    92  			},
    93  			Upstreams: structs.TestUpstreams(t),
    94  		},
    95  	}
    96  
    97  	// BEFORE we register, we should be able to get a watch channel
    98  	wCh, cancel := m.Watch(webProxy.ID)
    99  	defer cancel()
   100  
   101  	// And it should block with nothing sent on it yet
   102  	assertWatchChanBlocks(t, wCh)
   103  
   104  	require.NoError(state.AddService(webProxy, "my-token"))
   105  
   106  	// We should see the initial config delivered but not until after the
   107  	// coalesce timeout
   108  	expectSnap := &ConfigSnapshot{
   109  		ProxyID: webProxy.ID,
   110  		Address: webProxy.Address,
   111  		Port:    webProxy.Port,
   112  		Proxy:   webProxy.Proxy,
   113  		Roots:   roots,
   114  		Leaf:    leaf,
   115  		UpstreamEndpoints: map[string]structs.CheckServiceNodes{
   116  			"service:db": TestUpstreamNodes(t),
   117  		},
   118  	}
   119  	start := time.Now()
   120  	assertWatchChanRecvs(t, wCh, expectSnap)
   121  	require.True(time.Since(start) >= coalesceTimeout)
   122  
   123  	assertLastReqArgs(t, types, "my-token", source)
   124  
   125  	// Update NodeConfig
   126  	webProxy.Port = 7777
   127  	require.NoError(state.AddService(webProxy, "my-token"))
   128  
   129  	expectSnap.Port = 7777
   130  	assertWatchChanRecvs(t, wCh, expectSnap)
   131  
   132  	// Register a second watcher
   133  	wCh2, cancel2 := m.Watch(webProxy.ID)
   134  	defer cancel2()
   135  
   136  	// New watcher should immediately receive the current state
   137  	assertWatchChanRecvs(t, wCh2, expectSnap)
   138  
   139  	// Change token
   140  	require.NoError(state.AddService(webProxy, "other-token"))
   141  	assertWatchChanRecvs(t, wCh, expectSnap)
   142  	assertWatchChanRecvs(t, wCh2, expectSnap)
   143  
   144  	// This is actually sort of timing dependent - the cache background fetcher
   145  	// will still be fetching with the old token, but we rely on the fact that our
   146  	// mock type will have been blocked on those for a while.
   147  	assertLastReqArgs(t, types, "other-token", source)
   148  	// Update roots
   149  	newRoots, newLeaf := TestCerts(t)
   150  	newRoots.Roots = append(newRoots.Roots, roots.Roots...)
   151  	types.roots.Set(newRoots)
   152  
   153  	// Expect new roots in snapshot
   154  	expectSnap.Roots = newRoots
   155  	assertWatchChanRecvs(t, wCh, expectSnap)
   156  	assertWatchChanRecvs(t, wCh2, expectSnap)
   157  
   158  	// Update leaf
   159  	types.leaf.Set(newLeaf)
   160  
   161  	// Expect new roots in snapshot
   162  	expectSnap.Leaf = newLeaf
   163  	assertWatchChanRecvs(t, wCh, expectSnap)
   164  	assertWatchChanRecvs(t, wCh2, expectSnap)
   165  
   166  	// Remove the proxy
   167  	state.RemoveService(webProxy.ID)
   168  
   169  	// Chan should NOT close
   170  	assertWatchChanBlocks(t, wCh)
   171  	assertWatchChanBlocks(t, wCh2)
   172  
   173  	// Re-add the proxy with another new port
   174  	webProxy.Port = 3333
   175  	require.NoError(state.AddService(webProxy, "other-token"))
   176  
   177  	// Same watch chan should be notified again
   178  	expectSnap.Port = 3333
   179  	assertWatchChanRecvs(t, wCh, expectSnap)
   180  	assertWatchChanRecvs(t, wCh2, expectSnap)
   181  
   182  	// Cancel watch
   183  	cancel()
   184  
   185  	// Watch chan should be closed
   186  	assertWatchChanRecvs(t, wCh, nil)
   187  
   188  	// We specifically don't remove the proxy or cancel the second watcher to
   189  	// ensure both are cleaned up by close.
   190  	require.NoError(m.Close())
   191  
   192  	// Sanity check the state is clean
   193  	m.mu.Lock()
   194  	defer m.mu.Unlock()
   195  	require.Len(m.proxies, 0)
   196  	require.Len(m.watchers, 0)
   197  }
   198  
   199  func assertWatchChanBlocks(t *testing.T, ch <-chan *ConfigSnapshot) {
   200  	t.Helper()
   201  
   202  	select {
   203  	case <-ch:
   204  		t.Fatal("Should be nothing sent on watch chan yet")
   205  	default:
   206  	}
   207  }
   208  
   209  func assertWatchChanRecvs(t *testing.T, ch <-chan *ConfigSnapshot, expect *ConfigSnapshot) {
   210  	t.Helper()
   211  
   212  	select {
   213  	case got, ok := <-ch:
   214  		require.Equal(t, expect, got)
   215  		if expect == nil {
   216  			require.False(t, ok, "watch chan should be closed")
   217  		}
   218  	case <-time.After(50*time.Millisecond + coalesceTimeout):
   219  		t.Fatal("recv timeout")
   220  	}
   221  }
   222  
   223  func TestManager_deliverLatest(t *testing.T) {
   224  	// None of these need to do anything to test this method just be valid
   225  	logger := log.New(os.Stderr, "", log.LstdFlags)
   226  	cfg := ManagerConfig{
   227  		Cache: cache.New(nil),
   228  		State: local.NewState(local.Config{}, logger, &token.Store{}),
   229  		Source: &structs.QuerySource{
   230  			Node:       "node1",
   231  			Datacenter: "dc1",
   232  		},
   233  		Logger: logger,
   234  	}
   235  	require := require.New(t)
   236  
   237  	m, err := NewManager(cfg)
   238  	require.NoError(err)
   239  
   240  	snap1 := &ConfigSnapshot{
   241  		ProxyID: "test-proxy",
   242  		Port:    1111,
   243  	}
   244  	snap2 := &ConfigSnapshot{
   245  		ProxyID: "test-proxy",
   246  		Port:    2222,
   247  	}
   248  
   249  	// Put an overall time limit on this test case so we don't have to guard every
   250  	// call to ensure the whole test doesn't deadlock.
   251  	time.AfterFunc(100*time.Millisecond, func() {
   252  		t.Fatal("test timed out")
   253  	})
   254  
   255  	// test 1 buffered chan
   256  	ch1 := make(chan *ConfigSnapshot, 1)
   257  
   258  	// Sending to an unblocked chan should work
   259  	m.deliverLatest(snap1, ch1)
   260  
   261  	// Check it was delivered
   262  	require.Equal(snap1, <-ch1)
   263  
   264  	// Now send both without reading simulating a slow client
   265  	m.deliverLatest(snap1, ch1)
   266  	m.deliverLatest(snap2, ch1)
   267  
   268  	// Check we got the _second_ one
   269  	require.Equal(snap2, <-ch1)
   270  
   271  	// Same again for 5-buffered chan
   272  	ch5 := make(chan *ConfigSnapshot, 5)
   273  
   274  	// Sending to an unblocked chan should work
   275  	m.deliverLatest(snap1, ch5)
   276  
   277  	// Check it was delivered
   278  	require.Equal(snap1, <-ch5)
   279  
   280  	// Now send enough to fill the chan simulating a slow client
   281  	for i := 0; i < 5; i++ {
   282  		m.deliverLatest(snap1, ch5)
   283  	}
   284  	m.deliverLatest(snap2, ch5)
   285  
   286  	// Check we got the _second_ one
   287  	require.Equal(snap2, <-ch5)
   288  }