github.com/ferranbt/nomad@v0.9.3-0.20190607002617-85c449b7667c/client/client_test.go (about)

     1  package client
     2  
     3  import (
     4  	"fmt"
     5  	"io/ioutil"
     6  	"os"
     7  	"path/filepath"
     8  	"runtime"
     9  	"sort"
    10  	"testing"
    11  	"time"
    12  
    13  	"github.com/hashicorp/go-memdb"
    14  	"github.com/hashicorp/nomad/client/config"
    15  	consulApi "github.com/hashicorp/nomad/client/consul"
    16  	"github.com/hashicorp/nomad/client/fingerprint"
    17  	"github.com/hashicorp/nomad/command/agent/consul"
    18  	"github.com/hashicorp/nomad/helper/pluginutils/catalog"
    19  	"github.com/hashicorp/nomad/helper/testlog"
    20  	"github.com/hashicorp/nomad/helper/uuid"
    21  	"github.com/hashicorp/nomad/nomad"
    22  	"github.com/hashicorp/nomad/nomad/mock"
    23  	"github.com/hashicorp/nomad/nomad/structs"
    24  	nconfig "github.com/hashicorp/nomad/nomad/structs/config"
    25  	"github.com/hashicorp/nomad/plugins/device"
    26  	psstructs "github.com/hashicorp/nomad/plugins/shared/structs"
    27  	"github.com/hashicorp/nomad/testutil"
    28  	"github.com/stretchr/testify/assert"
    29  
    30  	"github.com/hashicorp/go-hclog"
    31  	cstate "github.com/hashicorp/nomad/client/state"
    32  	ctestutil "github.com/hashicorp/nomad/client/testutil"
    33  	"github.com/stretchr/testify/require"
    34  )
    35  
    36  func testACLServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string, *structs.ACLToken) {
    37  	server, token := nomad.TestACLServer(t, cb)
    38  	return server, server.GetConfig().RPCAddr.String(), token
    39  }
    40  
    41  func testServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string) {
    42  	server := nomad.TestServer(t, cb)
    43  	return server, server.GetConfig().RPCAddr.String()
    44  }
    45  
    46  func TestClient_StartStop(t *testing.T) {
    47  	t.Parallel()
    48  	client, cleanup := TestClient(t, nil)
    49  	defer cleanup()
    50  	if err := client.Shutdown(); err != nil {
    51  		t.Fatalf("err: %v", err)
    52  	}
    53  }
    54  
    55  // Certain labels for metrics are dependant on client initial setup. This tests
    56  // that the client has properly initialized before we assign values to labels
    57  func TestClient_BaseLabels(t *testing.T) {
    58  	t.Parallel()
    59  	assert := assert.New(t)
    60  
    61  	client, cleanup := TestClient(t, nil)
    62  	if err := client.Shutdown(); err != nil {
    63  		t.Fatalf("err: %v", err)
    64  	}
    65  	defer cleanup()
    66  
    67  	// directly invoke this function, as otherwise this will fail on a CI build
    68  	// due to a race condition
    69  	client.emitStats()
    70  
    71  	baseLabels := client.baseLabels
    72  	assert.NotEqual(0, len(baseLabels))
    73  
    74  	nodeID := client.Node().ID
    75  	for _, e := range baseLabels {
    76  		if e.Name == "node_id" {
    77  			assert.Equal(nodeID, e.Value)
    78  		}
    79  	}
    80  }
    81  
    82  func TestClient_RPC(t *testing.T) {
    83  	t.Parallel()
    84  	s1, addr := testServer(t, nil)
    85  	defer s1.Shutdown()
    86  
    87  	c1, cleanup := TestClient(t, func(c *config.Config) {
    88  		c.Servers = []string{addr}
    89  	})
    90  	defer cleanup()
    91  
    92  	// RPC should succeed
    93  	testutil.WaitForResult(func() (bool, error) {
    94  		var out struct{}
    95  		err := c1.RPC("Status.Ping", struct{}{}, &out)
    96  		return err == nil, err
    97  	}, func(err error) {
    98  		t.Fatalf("err: %v", err)
    99  	})
   100  }
   101  
   102  func TestClient_RPC_FireRetryWatchers(t *testing.T) {
   103  	t.Parallel()
   104  	s1, addr := testServer(t, nil)
   105  	defer s1.Shutdown()
   106  
   107  	c1, cleanup := TestClient(t, func(c *config.Config) {
   108  		c.Servers = []string{addr}
   109  	})
   110  	defer cleanup()
   111  
   112  	watcher := c1.rpcRetryWatcher()
   113  
   114  	// RPC should succeed
   115  	testutil.WaitForResult(func() (bool, error) {
   116  		var out struct{}
   117  		err := c1.RPC("Status.Ping", struct{}{}, &out)
   118  		return err == nil, err
   119  	}, func(err error) {
   120  		t.Fatalf("err: %v", err)
   121  	})
   122  
   123  	select {
   124  	case <-watcher:
   125  	default:
   126  		t.Fatal("watcher should be fired")
   127  	}
   128  }
   129  
   130  func TestClient_RPC_Passthrough(t *testing.T) {
   131  	t.Parallel()
   132  	s1, _ := testServer(t, nil)
   133  	defer s1.Shutdown()
   134  
   135  	c1, cleanup := TestClient(t, func(c *config.Config) {
   136  		c.RPCHandler = s1
   137  	})
   138  	defer cleanup()
   139  
   140  	// RPC should succeed
   141  	testutil.WaitForResult(func() (bool, error) {
   142  		var out struct{}
   143  		err := c1.RPC("Status.Ping", struct{}{}, &out)
   144  		return err == nil, err
   145  	}, func(err error) {
   146  		t.Fatalf("err: %v", err)
   147  	})
   148  }
   149  
   150  func TestClient_Fingerprint(t *testing.T) {
   151  	t.Parallel()
   152  
   153  	c, cleanup := TestClient(t, nil)
   154  	defer cleanup()
   155  
   156  	// Ensure we are fingerprinting
   157  	testutil.WaitForResult(func() (bool, error) {
   158  		node := c.Node()
   159  		if _, ok := node.Attributes["kernel.name"]; !ok {
   160  			return false, fmt.Errorf("Expected value for kernel.name")
   161  		}
   162  		if _, ok := node.Attributes["cpu.arch"]; !ok {
   163  			return false, fmt.Errorf("Expected value for cpu.arch")
   164  		}
   165  		return true, nil
   166  	}, func(err error) {
   167  		t.Fatalf("err: %v", err)
   168  	})
   169  }
   170  
   171  // TestClient_Fingerprint_Periodic asserts that driver node attributes are
   172  // periodically fingerprinted.
   173  func TestClient_Fingerprint_Periodic(t *testing.T) {
   174  	t.Parallel()
   175  
   176  	c1, cleanup := TestClient(t, func(c *config.Config) {
   177  		confs := []*nconfig.PluginConfig{
   178  			{
   179  				Name: "mock_driver",
   180  				Config: map[string]interface{}{
   181  					"shutdown_periodic_after":    true,
   182  					"shutdown_periodic_duration": time.Second,
   183  				},
   184  			},
   185  		}
   186  		c.PluginLoader = catalog.TestPluginLoaderWithOptions(t, "", nil, confs)
   187  	})
   188  	defer cleanup()
   189  
   190  	node := c1.config.Node
   191  	{
   192  		// Ensure the mock driver is registered on the client
   193  		testutil.WaitForResult(func() (bool, error) {
   194  			c1.configLock.Lock()
   195  			defer c1.configLock.Unlock()
   196  
   197  			// assert that the driver is set on the node attributes
   198  			mockDriverInfoAttr := node.Attributes["driver.mock_driver"]
   199  			if mockDriverInfoAttr == "" {
   200  				return false, fmt.Errorf("mock driver is empty when it should be set on the node attributes")
   201  			}
   202  
   203  			mockDriverInfo := node.Drivers["mock_driver"]
   204  
   205  			// assert that the Driver information for the node is also set correctly
   206  			if mockDriverInfo == nil {
   207  				return false, fmt.Errorf("mock driver is nil when it should be set on node Drivers")
   208  			}
   209  			if !mockDriverInfo.Detected {
   210  				return false, fmt.Errorf("mock driver should be set as detected")
   211  			}
   212  			if !mockDriverInfo.Healthy {
   213  				return false, fmt.Errorf("mock driver should be set as healthy")
   214  			}
   215  			if mockDriverInfo.HealthDescription == "" {
   216  				return false, fmt.Errorf("mock driver description should not be empty")
   217  			}
   218  			return true, nil
   219  		}, func(err error) {
   220  			t.Fatalf("err: %v", err)
   221  		})
   222  	}
   223  
   224  	{
   225  		testutil.WaitForResult(func() (bool, error) {
   226  			c1.configLock.Lock()
   227  			defer c1.configLock.Unlock()
   228  			mockDriverInfo := node.Drivers["mock_driver"]
   229  			// assert that the Driver information for the node is also set correctly
   230  			if mockDriverInfo == nil {
   231  				return false, fmt.Errorf("mock driver is nil when it should be set on node Drivers")
   232  			}
   233  			if mockDriverInfo.Detected {
   234  				return false, fmt.Errorf("mock driver should not be set as detected")
   235  			}
   236  			if mockDriverInfo.Healthy {
   237  				return false, fmt.Errorf("mock driver should not be set as healthy")
   238  			}
   239  			if mockDriverInfo.HealthDescription == "" {
   240  				return false, fmt.Errorf("mock driver description should not be empty")
   241  			}
   242  			return true, nil
   243  		}, func(err error) {
   244  			t.Fatalf("err: %v", err)
   245  		})
   246  	}
   247  }
   248  
   249  // TestClient_MixedTLS asserts that when a server is running with TLS enabled
   250  // it will reject any RPC connections from clients that lack TLS. See #2525
   251  func TestClient_MixedTLS(t *testing.T) {
   252  	t.Parallel()
   253  	const (
   254  		cafile  = "../helper/tlsutil/testdata/ca.pem"
   255  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
   256  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
   257  	)
   258  	s1, addr := testServer(t, func(c *nomad.Config) {
   259  		c.TLSConfig = &nconfig.TLSConfig{
   260  			EnableHTTP:           true,
   261  			EnableRPC:            true,
   262  			VerifyServerHostname: true,
   263  			CAFile:               cafile,
   264  			CertFile:             foocert,
   265  			KeyFile:              fookey,
   266  		}
   267  	})
   268  	defer s1.Shutdown()
   269  	testutil.WaitForLeader(t, s1.RPC)
   270  
   271  	c1, cleanup := TestClient(t, func(c *config.Config) {
   272  		c.Servers = []string{addr}
   273  	})
   274  	defer cleanup()
   275  
   276  	req := structs.NodeSpecificRequest{
   277  		NodeID:       c1.Node().ID,
   278  		QueryOptions: structs.QueryOptions{Region: "global"},
   279  	}
   280  	var out structs.SingleNodeResponse
   281  	testutil.AssertUntil(100*time.Millisecond,
   282  		func() (bool, error) {
   283  			err := c1.RPC("Node.GetNode", &req, &out)
   284  			if err == nil {
   285  				return false, fmt.Errorf("client RPC succeeded when it should have failed:\n%+v", out)
   286  			}
   287  			return true, nil
   288  		},
   289  		func(err error) {
   290  			t.Fatalf(err.Error())
   291  		},
   292  	)
   293  }
   294  
   295  // TestClient_BadTLS asserts that when a client and server are running with TLS
   296  // enabled -- but their certificates are signed by different CAs -- they're
   297  // unable to communicate.
   298  func TestClient_BadTLS(t *testing.T) {
   299  	t.Parallel()
   300  	const (
   301  		cafile  = "../helper/tlsutil/testdata/ca.pem"
   302  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
   303  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
   304  		badca   = "../helper/tlsutil/testdata/ca-bad.pem"
   305  		badcert = "../helper/tlsutil/testdata/nomad-bad.pem"
   306  		badkey  = "../helper/tlsutil/testdata/nomad-bad-key.pem"
   307  	)
   308  	s1, addr := testServer(t, func(c *nomad.Config) {
   309  		c.TLSConfig = &nconfig.TLSConfig{
   310  			EnableHTTP:           true,
   311  			EnableRPC:            true,
   312  			VerifyServerHostname: true,
   313  			CAFile:               cafile,
   314  			CertFile:             foocert,
   315  			KeyFile:              fookey,
   316  		}
   317  	})
   318  	defer s1.Shutdown()
   319  	testutil.WaitForLeader(t, s1.RPC)
   320  
   321  	c1, cleanup := TestClient(t, func(c *config.Config) {
   322  		c.Servers = []string{addr}
   323  		c.TLSConfig = &nconfig.TLSConfig{
   324  			EnableHTTP:           true,
   325  			EnableRPC:            true,
   326  			VerifyServerHostname: true,
   327  			CAFile:               badca,
   328  			CertFile:             badcert,
   329  			KeyFile:              badkey,
   330  		}
   331  	})
   332  	defer cleanup()
   333  
   334  	req := structs.NodeSpecificRequest{
   335  		NodeID:       c1.Node().ID,
   336  		QueryOptions: structs.QueryOptions{Region: "global"},
   337  	}
   338  	var out structs.SingleNodeResponse
   339  	testutil.AssertUntil(100*time.Millisecond,
   340  		func() (bool, error) {
   341  			err := c1.RPC("Node.GetNode", &req, &out)
   342  			if err == nil {
   343  				return false, fmt.Errorf("client RPC succeeded when it should have failed:\n%+v", out)
   344  			}
   345  			return true, nil
   346  		},
   347  		func(err error) {
   348  			t.Fatalf(err.Error())
   349  		},
   350  	)
   351  }
   352  
   353  func TestClient_Register(t *testing.T) {
   354  	t.Parallel()
   355  	s1, _ := testServer(t, nil)
   356  	defer s1.Shutdown()
   357  	testutil.WaitForLeader(t, s1.RPC)
   358  
   359  	c1, cleanup := TestClient(t, func(c *config.Config) {
   360  		c.RPCHandler = s1
   361  	})
   362  	defer cleanup()
   363  
   364  	req := structs.NodeSpecificRequest{
   365  		NodeID:       c1.Node().ID,
   366  		QueryOptions: structs.QueryOptions{Region: "global"},
   367  	}
   368  	var out structs.SingleNodeResponse
   369  
   370  	// Register should succeed
   371  	testutil.WaitForResult(func() (bool, error) {
   372  		err := s1.RPC("Node.GetNode", &req, &out)
   373  		if err != nil {
   374  			return false, err
   375  		}
   376  		if out.Node == nil {
   377  			return false, fmt.Errorf("missing reg")
   378  		}
   379  		return out.Node.ID == req.NodeID, nil
   380  	}, func(err error) {
   381  		t.Fatalf("err: %v", err)
   382  	})
   383  }
   384  
   385  func TestClient_Heartbeat(t *testing.T) {
   386  	t.Parallel()
   387  	s1, _ := testServer(t, func(c *nomad.Config) {
   388  		c.MinHeartbeatTTL = 50 * time.Millisecond
   389  	})
   390  	defer s1.Shutdown()
   391  	testutil.WaitForLeader(t, s1.RPC)
   392  
   393  	c1, cleanup := TestClient(t, func(c *config.Config) {
   394  		c.RPCHandler = s1
   395  	})
   396  	defer cleanup()
   397  
   398  	req := structs.NodeSpecificRequest{
   399  		NodeID:       c1.Node().ID,
   400  		QueryOptions: structs.QueryOptions{Region: "global"},
   401  	}
   402  	var out structs.SingleNodeResponse
   403  
   404  	// Register should succeed
   405  	testutil.WaitForResult(func() (bool, error) {
   406  		err := s1.RPC("Node.GetNode", &req, &out)
   407  		if err != nil {
   408  			return false, err
   409  		}
   410  		if out.Node == nil {
   411  			return false, fmt.Errorf("missing reg")
   412  		}
   413  		return out.Node.Status == structs.NodeStatusReady, nil
   414  	}, func(err error) {
   415  		t.Fatalf("err: %v", err)
   416  	})
   417  }
   418  
   419  // TestClient_UpdateAllocStatus that once running allocations send updates to
   420  // the server.
   421  func TestClient_UpdateAllocStatus(t *testing.T) {
   422  	t.Parallel()
   423  	s1, _ := testServer(t, nil)
   424  	defer s1.Shutdown()
   425  
   426  	_, cleanup := TestClient(t, func(c *config.Config) {
   427  		c.RPCHandler = s1
   428  	})
   429  	defer cleanup()
   430  
   431  	job := mock.Job()
   432  	// allow running job on any node including self client, that may not be a Linux box
   433  	job.Constraints = nil
   434  	job.TaskGroups[0].Count = 1
   435  	task := job.TaskGroups[0].Tasks[0]
   436  	task.Driver = "mock_driver"
   437  	task.Config = map[string]interface{}{
   438  		"run_for": "10s",
   439  	}
   440  	task.Services = nil
   441  
   442  	// WaitForRunning polls the server until the ClientStatus is running
   443  	testutil.WaitForRunning(t, s1.RPC, job)
   444  }
   445  
   446  func TestClient_WatchAllocs(t *testing.T) {
   447  	t.Parallel()
   448  	ctestutil.ExecCompatible(t)
   449  	s1, _ := testServer(t, nil)
   450  	defer s1.Shutdown()
   451  	testutil.WaitForLeader(t, s1.RPC)
   452  
   453  	c1, cleanup := TestClient(t, func(c *config.Config) {
   454  		c.RPCHandler = s1
   455  	})
   456  	defer cleanup()
   457  
   458  	// Wait until the node is ready
   459  	waitTilNodeReady(c1, t)
   460  
   461  	// Create mock allocations
   462  	job := mock.Job()
   463  	alloc1 := mock.Alloc()
   464  	alloc1.JobID = job.ID
   465  	alloc1.Job = job
   466  	alloc1.NodeID = c1.Node().ID
   467  	alloc2 := mock.Alloc()
   468  	alloc2.NodeID = c1.Node().ID
   469  	alloc2.JobID = job.ID
   470  	alloc2.Job = job
   471  
   472  	state := s1.State()
   473  	if err := state.UpsertJob(100, job); err != nil {
   474  		t.Fatal(err)
   475  	}
   476  	if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
   477  		t.Fatal(err)
   478  	}
   479  	err := state.UpsertAllocs(102, []*structs.Allocation{alloc1, alloc2})
   480  	if err != nil {
   481  		t.Fatalf("err: %v", err)
   482  	}
   483  
   484  	// Both allocations should get registered
   485  	testutil.WaitForResult(func() (bool, error) {
   486  		c1.allocLock.RLock()
   487  		num := len(c1.allocs)
   488  		c1.allocLock.RUnlock()
   489  		return num == 2, nil
   490  	}, func(err error) {
   491  		t.Fatalf("err: %v", err)
   492  	})
   493  
   494  	// Delete one allocation
   495  	if err := state.DeleteEval(103, nil, []string{alloc1.ID}); err != nil {
   496  		t.Fatalf("err: %v", err)
   497  	}
   498  
   499  	// Update the other allocation. Have to make a copy because the allocs are
   500  	// shared in memory in the test and the modify index would be updated in the
   501  	// alloc runner.
   502  	alloc2_2 := alloc2.Copy()
   503  	alloc2_2.DesiredStatus = structs.AllocDesiredStatusStop
   504  	if err := state.UpsertAllocs(104, []*structs.Allocation{alloc2_2}); err != nil {
   505  		t.Fatalf("err upserting stopped alloc: %v", err)
   506  	}
   507  
   508  	// One allocation should get GC'd and removed
   509  	testutil.WaitForResult(func() (bool, error) {
   510  		c1.allocLock.RLock()
   511  		num := len(c1.allocs)
   512  		c1.allocLock.RUnlock()
   513  		return num == 1, nil
   514  	}, func(err error) {
   515  		t.Fatalf("err: %v", err)
   516  	})
   517  
   518  	// One allocations should get updated
   519  	testutil.WaitForResult(func() (bool, error) {
   520  		c1.allocLock.RLock()
   521  		ar := c1.allocs[alloc2.ID]
   522  		c1.allocLock.RUnlock()
   523  		return ar.Alloc().DesiredStatus == structs.AllocDesiredStatusStop, nil
   524  	}, func(err error) {
   525  		t.Fatalf("err: %v", err)
   526  	})
   527  }
   528  
   529  func waitTilNodeReady(client *Client, t *testing.T) {
   530  	testutil.WaitForResult(func() (bool, error) {
   531  		n := client.Node()
   532  		if n.Status != structs.NodeStatusReady {
   533  			return false, fmt.Errorf("node not registered")
   534  		}
   535  		return true, nil
   536  	}, func(err error) {
   537  		t.Fatalf("err: %v", err)
   538  	})
   539  }
   540  
   541  func TestClient_SaveRestoreState(t *testing.T) {
   542  	t.Parallel()
   543  
   544  	s1, _ := testServer(t, nil)
   545  	defer s1.Shutdown()
   546  	testutil.WaitForLeader(t, s1.RPC)
   547  
   548  	c1, cleanup := TestClient(t, func(c *config.Config) {
   549  		c.DevMode = false
   550  		c.RPCHandler = s1
   551  	})
   552  	defer cleanup()
   553  
   554  	// Wait until the node is ready
   555  	waitTilNodeReady(c1, t)
   556  
   557  	// Create mock allocations
   558  	job := mock.Job()
   559  	alloc1 := mock.Alloc()
   560  	alloc1.NodeID = c1.Node().ID
   561  	alloc1.Job = job
   562  	alloc1.JobID = job.ID
   563  	alloc1.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
   564  	alloc1.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   565  		"run_for": "10s",
   566  	}
   567  	alloc1.ClientStatus = structs.AllocClientStatusRunning
   568  
   569  	state := s1.State()
   570  	if err := state.UpsertJob(100, job); err != nil {
   571  		t.Fatal(err)
   572  	}
   573  	if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
   574  		t.Fatal(err)
   575  	}
   576  	if err := state.UpsertAllocs(102, []*structs.Allocation{alloc1}); err != nil {
   577  		t.Fatalf("err: %v", err)
   578  	}
   579  
   580  	// Allocations should get registered
   581  	testutil.WaitForResult(func() (bool, error) {
   582  		c1.allocLock.RLock()
   583  		ar := c1.allocs[alloc1.ID]
   584  		c1.allocLock.RUnlock()
   585  		if ar == nil {
   586  			return false, fmt.Errorf("nil alloc runner")
   587  		}
   588  		if ar.Alloc().ClientStatus != structs.AllocClientStatusRunning {
   589  			return false, fmt.Errorf("client status: got %v; want %v", ar.Alloc().ClientStatus, structs.AllocClientStatusRunning)
   590  		}
   591  		return true, nil
   592  	}, func(err error) {
   593  		t.Fatalf("err: %v", err)
   594  	})
   595  
   596  	// Shutdown the client, saves state
   597  	if err := c1.Shutdown(); err != nil {
   598  		t.Fatalf("err: %v", err)
   599  	}
   600  
   601  	// Create a new client
   602  	logger := testlog.HCLogger(t)
   603  	c1.config.Logger = logger
   604  	catalog := consul.NewMockCatalog(logger)
   605  	mockService := consulApi.NewMockConsulServiceClient(t, logger)
   606  
   607  	c2, err := NewClient(c1.config, catalog, mockService)
   608  	if err != nil {
   609  		t.Fatalf("err: %v", err)
   610  	}
   611  	defer c2.Shutdown()
   612  
   613  	// Ensure the allocation is running
   614  	testutil.WaitForResult(func() (bool, error) {
   615  		c2.allocLock.RLock()
   616  		ar := c2.allocs[alloc1.ID]
   617  		c2.allocLock.RUnlock()
   618  		status := ar.Alloc().ClientStatus
   619  		alive := status == structs.AllocClientStatusRunning || status == structs.AllocClientStatusPending
   620  		if !alive {
   621  			return false, fmt.Errorf("incorrect client status: %#v", ar.Alloc())
   622  		}
   623  		return true, nil
   624  	}, func(err error) {
   625  		t.Fatalf("err: %v", err)
   626  	})
   627  
   628  	// Destroy all the allocations
   629  	for _, ar := range c2.getAllocRunners() {
   630  		ar.Destroy()
   631  	}
   632  
   633  	for _, ar := range c2.getAllocRunners() {
   634  		<-ar.DestroyCh()
   635  	}
   636  }
   637  
   638  func TestClient_RestoreError(t *testing.T) {
   639  	t.Parallel()
   640  	require := require.New(t)
   641  
   642  	s1, _ := testServer(t, nil)
   643  	defer s1.Shutdown()
   644  	testutil.WaitForLeader(t, s1.RPC)
   645  
   646  	c1, cleanup := TestClient(t, func(c *config.Config) {
   647  		c.DevMode = false
   648  		c.RPCHandler = s1
   649  	})
   650  	defer cleanup()
   651  
   652  	// Wait until the node is ready
   653  	waitTilNodeReady(c1, t)
   654  
   655  	// Create mock allocations
   656  	job := mock.Job()
   657  	alloc1 := mock.Alloc()
   658  	alloc1.NodeID = c1.Node().ID
   659  	alloc1.Job = job
   660  	alloc1.JobID = job.ID
   661  	alloc1.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
   662  	alloc1.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   663  		"run_for": "10s",
   664  	}
   665  	alloc1.ClientStatus = structs.AllocClientStatusRunning
   666  
   667  	state := s1.State()
   668  	err := state.UpsertJob(100, job)
   669  	require.Nil(err)
   670  
   671  	err = state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID))
   672  	require.Nil(err)
   673  
   674  	err = state.UpsertAllocs(102, []*structs.Allocation{alloc1})
   675  	require.Nil(err)
   676  
   677  	// Allocations should get registered
   678  	testutil.WaitForResult(func() (bool, error) {
   679  		c1.allocLock.RLock()
   680  		ar := c1.allocs[alloc1.ID]
   681  		c1.allocLock.RUnlock()
   682  		if ar == nil {
   683  			return false, fmt.Errorf("nil alloc runner")
   684  		}
   685  		if ar.Alloc().ClientStatus != structs.AllocClientStatusRunning {
   686  			return false, fmt.Errorf("client status: got %v; want %v", ar.Alloc().ClientStatus, structs.AllocClientStatusRunning)
   687  		}
   688  		return true, nil
   689  	}, func(err error) {
   690  		t.Fatalf("err: %v", err)
   691  	})
   692  
   693  	// Shutdown the client, saves state
   694  	if err := c1.Shutdown(); err != nil {
   695  		t.Fatalf("err: %v", err)
   696  	}
   697  
   698  	// Create a new client with a stateDB implementation that errors
   699  	logger := testlog.HCLogger(t)
   700  	c1.config.Logger = logger
   701  	catalog := consul.NewMockCatalog(logger)
   702  	mockService := consulApi.NewMockConsulServiceClient(t, logger)
   703  
   704  	// This stateDB returns errors for all methods called by restore
   705  	stateDBFunc := func(hclog.Logger, string) (cstate.StateDB, error) {
   706  		return &cstate.ErrDB{Allocs: []*structs.Allocation{alloc1}}, nil
   707  	}
   708  	c1.config.StateDBFactory = stateDBFunc
   709  
   710  	c2, err := NewClient(c1.config, catalog, mockService)
   711  	require.Nil(err)
   712  	defer c2.Shutdown()
   713  
   714  	// Ensure the allocation has been marked as failed on the server
   715  	testutil.WaitForResult(func() (bool, error) {
   716  		alloc, err := s1.State().AllocByID(nil, alloc1.ID)
   717  		require.Nil(err)
   718  		failed := alloc.ClientStatus == structs.AllocClientStatusFailed
   719  		if !failed {
   720  			return false, fmt.Errorf("Expected failed client status, but got %v", alloc.ClientStatus)
   721  		}
   722  		return true, nil
   723  	}, func(err error) {
   724  		require.NoError(err)
   725  	})
   726  
   727  }
   728  
   729  func TestClient_AddAllocError(t *testing.T) {
   730  	t.Parallel()
   731  	require := require.New(t)
   732  
   733  	s1, _ := testServer(t, nil)
   734  	defer s1.Shutdown()
   735  	testutil.WaitForLeader(t, s1.RPC)
   736  
   737  	c1, cleanup := TestClient(t, func(c *config.Config) {
   738  		c.DevMode = false
   739  		c.RPCHandler = s1
   740  	})
   741  	defer cleanup()
   742  
   743  	// Wait until the node is ready
   744  	waitTilNodeReady(c1, t)
   745  
   746  	// Create mock allocation with invalid task group name
   747  	job := mock.Job()
   748  	alloc1 := mock.Alloc()
   749  	alloc1.NodeID = c1.Node().ID
   750  	alloc1.Job = job
   751  	alloc1.JobID = job.ID
   752  	alloc1.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
   753  	alloc1.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   754  		"run_for": "10s",
   755  	}
   756  	alloc1.ClientStatus = structs.AllocClientStatusPending
   757  
   758  	// Set these two fields to nil to cause alloc runner creation to fail
   759  	alloc1.AllocatedResources = nil
   760  	alloc1.TaskResources = nil
   761  
   762  	state := s1.State()
   763  	err := state.UpsertJob(100, job)
   764  	require.Nil(err)
   765  
   766  	err = state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID))
   767  	require.Nil(err)
   768  
   769  	err = state.UpsertAllocs(102, []*structs.Allocation{alloc1})
   770  	require.Nil(err)
   771  
   772  	// Push this alloc update to the client
   773  	allocUpdates := &allocUpdates{
   774  		pulled: map[string]*structs.Allocation{
   775  			alloc1.ID: alloc1,
   776  		},
   777  	}
   778  	c1.runAllocs(allocUpdates)
   779  
   780  	// Ensure the allocation has been marked as invalid and failed on the server
   781  	testutil.WaitForResult(func() (bool, error) {
   782  		c1.allocLock.RLock()
   783  		ar := c1.allocs[alloc1.ID]
   784  		_, isInvalid := c1.invalidAllocs[alloc1.ID]
   785  		c1.allocLock.RUnlock()
   786  		if ar != nil {
   787  			return false, fmt.Errorf("expected nil alloc runner")
   788  		}
   789  		if !isInvalid {
   790  			return false, fmt.Errorf("expected alloc to be marked as invalid")
   791  		}
   792  		alloc, err := s1.State().AllocByID(nil, alloc1.ID)
   793  		require.Nil(err)
   794  		failed := alloc.ClientStatus == structs.AllocClientStatusFailed
   795  		if !failed {
   796  			return false, fmt.Errorf("Expected failed client status, but got %v", alloc.ClientStatus)
   797  		}
   798  		return true, nil
   799  	}, func(err error) {
   800  		require.NoError(err)
   801  	})
   802  
   803  }
   804  
   805  func TestClient_Init(t *testing.T) {
   806  	t.Parallel()
   807  	dir, err := ioutil.TempDir("", "nomad")
   808  	if err != nil {
   809  		t.Fatalf("err: %s", err)
   810  	}
   811  	defer os.RemoveAll(dir)
   812  	allocDir := filepath.Join(dir, "alloc")
   813  
   814  	client := &Client{
   815  		config: &config.Config{
   816  			AllocDir:       allocDir,
   817  			StateDBFactory: cstate.GetStateDBFactory(true),
   818  		},
   819  		logger: testlog.HCLogger(t),
   820  	}
   821  
   822  	if err := client.init(); err != nil {
   823  		t.Fatalf("err: %s", err)
   824  	}
   825  
   826  	if _, err := os.Stat(allocDir); err != nil {
   827  		t.Fatalf("err: %s", err)
   828  	}
   829  }
   830  
   831  func TestClient_BlockedAllocations(t *testing.T) {
   832  	t.Parallel()
   833  	s1, _ := testServer(t, nil)
   834  	defer s1.Shutdown()
   835  	testutil.WaitForLeader(t, s1.RPC)
   836  
   837  	c1, cleanup := TestClient(t, func(c *config.Config) {
   838  		c.RPCHandler = s1
   839  	})
   840  	defer cleanup()
   841  
   842  	// Wait for the node to be ready
   843  	state := s1.State()
   844  	testutil.WaitForResult(func() (bool, error) {
   845  		ws := memdb.NewWatchSet()
   846  		out, err := state.NodeByID(ws, c1.Node().ID)
   847  		if err != nil {
   848  			return false, err
   849  		}
   850  		if out == nil || out.Status != structs.NodeStatusReady {
   851  			return false, fmt.Errorf("bad node: %#v", out)
   852  		}
   853  		return true, nil
   854  	}, func(err error) {
   855  		t.Fatalf("err: %v", err)
   856  	})
   857  
   858  	// Add an allocation
   859  	alloc := mock.Alloc()
   860  	alloc.NodeID = c1.Node().ID
   861  	alloc.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
   862  	alloc.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   863  		"kill_after":  "1s",
   864  		"run_for":     "100s",
   865  		"exit_code":   0,
   866  		"exit_signal": 0,
   867  	}
   868  
   869  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
   870  	state.UpsertAllocs(100, []*structs.Allocation{alloc})
   871  
   872  	// Wait until the client downloads and starts the allocation
   873  	testutil.WaitForResult(func() (bool, error) {
   874  		ws := memdb.NewWatchSet()
   875  		out, err := state.AllocByID(ws, alloc.ID)
   876  		if err != nil {
   877  			return false, err
   878  		}
   879  		if out == nil || out.ClientStatus != structs.AllocClientStatusRunning {
   880  			return false, fmt.Errorf("bad alloc: %#v", out)
   881  		}
   882  		return true, nil
   883  	}, func(err error) {
   884  		t.Fatalf("err: %v", err)
   885  	})
   886  
   887  	// Add a new chained alloc
   888  	alloc2 := alloc.Copy()
   889  	alloc2.ID = uuid.Generate()
   890  	alloc2.Job = alloc.Job
   891  	alloc2.JobID = alloc.JobID
   892  	alloc2.PreviousAllocation = alloc.ID
   893  	if err := state.UpsertAllocs(200, []*structs.Allocation{alloc2}); err != nil {
   894  		t.Fatalf("err: %v", err)
   895  	}
   896  
   897  	// Ensure that the chained allocation is being tracked as blocked
   898  	testutil.WaitForResult(func() (bool, error) {
   899  		ar := c1.getAllocRunners()[alloc2.ID]
   900  		if ar == nil {
   901  			return false, fmt.Errorf("alloc 2's alloc runner does not exist")
   902  		}
   903  		if !ar.IsWaiting() {
   904  			return false, fmt.Errorf("alloc 2 is not blocked")
   905  		}
   906  		return true, nil
   907  	}, func(err error) {
   908  		t.Fatalf("err: %v", err)
   909  	})
   910  
   911  	// Change the desired state of the parent alloc to stop
   912  	alloc1 := alloc.Copy()
   913  	alloc1.DesiredStatus = structs.AllocDesiredStatusStop
   914  	if err := state.UpsertAllocs(300, []*structs.Allocation{alloc1}); err != nil {
   915  		t.Fatalf("err: %v", err)
   916  	}
   917  
   918  	// Ensure that there are no blocked allocations
   919  	testutil.WaitForResult(func() (bool, error) {
   920  		for id, ar := range c1.getAllocRunners() {
   921  			if ar.IsWaiting() {
   922  				return false, fmt.Errorf("%q still blocked", id)
   923  			}
   924  			if ar.IsMigrating() {
   925  				return false, fmt.Errorf("%q still migrating", id)
   926  			}
   927  		}
   928  		return true, nil
   929  	}, func(err error) {
   930  		t.Fatalf("err: %v", err)
   931  	})
   932  
   933  	// Destroy all the allocations
   934  	for _, ar := range c1.getAllocRunners() {
   935  		ar.Destroy()
   936  	}
   937  
   938  	for _, ar := range c1.getAllocRunners() {
   939  		<-ar.DestroyCh()
   940  	}
   941  }
   942  
   943  func TestClient_ValidateMigrateToken_ValidToken(t *testing.T) {
   944  	t.Parallel()
   945  	assert := assert.New(t)
   946  
   947  	c, cleanup := TestClient(t, func(c *config.Config) {
   948  		c.ACLEnabled = true
   949  	})
   950  	defer cleanup()
   951  
   952  	alloc := mock.Alloc()
   953  	validToken, err := structs.GenerateMigrateToken(alloc.ID, c.secretNodeID())
   954  	assert.Nil(err)
   955  
   956  	assert.Equal(c.ValidateMigrateToken(alloc.ID, validToken), true)
   957  }
   958  
   959  func TestClient_ValidateMigrateToken_InvalidToken(t *testing.T) {
   960  	t.Parallel()
   961  	assert := assert.New(t)
   962  
   963  	c, cleanup := TestClient(t, func(c *config.Config) {
   964  		c.ACLEnabled = true
   965  	})
   966  	defer cleanup()
   967  
   968  	assert.Equal(c.ValidateMigrateToken("", ""), false)
   969  
   970  	alloc := mock.Alloc()
   971  	assert.Equal(c.ValidateMigrateToken(alloc.ID, alloc.ID), false)
   972  	assert.Equal(c.ValidateMigrateToken(alloc.ID, ""), false)
   973  }
   974  
   975  func TestClient_ValidateMigrateToken_ACLDisabled(t *testing.T) {
   976  	t.Parallel()
   977  	assert := assert.New(t)
   978  
   979  	c, cleanup := TestClient(t, func(c *config.Config) {})
   980  	defer cleanup()
   981  
   982  	assert.Equal(c.ValidateMigrateToken("", ""), true)
   983  }
   984  
   985  func TestClient_ReloadTLS_UpgradePlaintextToTLS(t *testing.T) {
   986  	t.Parallel()
   987  	assert := assert.New(t)
   988  
   989  	s1, addr := testServer(t, func(c *nomad.Config) {
   990  		c.Region = "regionFoo"
   991  	})
   992  	defer s1.Shutdown()
   993  	testutil.WaitForLeader(t, s1.RPC)
   994  
   995  	const (
   996  		cafile  = "../helper/tlsutil/testdata/ca.pem"
   997  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
   998  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
   999  	)
  1000  
  1001  	c1, cleanup := TestClient(t, func(c *config.Config) {
  1002  		c.Servers = []string{addr}
  1003  	})
  1004  	defer cleanup()
  1005  
  1006  	// Registering a node over plaintext should succeed
  1007  	{
  1008  		req := structs.NodeSpecificRequest{
  1009  			NodeID:       c1.Node().ID,
  1010  			QueryOptions: structs.QueryOptions{Region: "regionFoo"},
  1011  		}
  1012  
  1013  		testutil.WaitForResult(func() (bool, error) {
  1014  			var out structs.SingleNodeResponse
  1015  			err := c1.RPC("Node.GetNode", &req, &out)
  1016  			if err != nil {
  1017  				return false, fmt.Errorf("client RPC failed when it should have succeeded:\n%+v", err)
  1018  			}
  1019  			return true, nil
  1020  		},
  1021  			func(err error) {
  1022  				t.Fatalf(err.Error())
  1023  			},
  1024  		)
  1025  	}
  1026  
  1027  	newConfig := &nconfig.TLSConfig{
  1028  		EnableHTTP:           true,
  1029  		EnableRPC:            true,
  1030  		VerifyServerHostname: true,
  1031  		CAFile:               cafile,
  1032  		CertFile:             foocert,
  1033  		KeyFile:              fookey,
  1034  	}
  1035  
  1036  	err := c1.reloadTLSConnections(newConfig)
  1037  	assert.Nil(err)
  1038  
  1039  	// Registering a node over plaintext should fail after the node has upgraded
  1040  	// to TLS
  1041  	{
  1042  		req := structs.NodeSpecificRequest{
  1043  			NodeID:       c1.Node().ID,
  1044  			QueryOptions: structs.QueryOptions{Region: "regionFoo"},
  1045  		}
  1046  		testutil.WaitForResult(func() (bool, error) {
  1047  			var out structs.SingleNodeResponse
  1048  			err := c1.RPC("Node.GetNode", &req, &out)
  1049  			if err == nil {
  1050  				return false, fmt.Errorf("client RPC succeeded when it should have failed:\n%+v", err)
  1051  			}
  1052  			return true, nil
  1053  		},
  1054  			func(err error) {
  1055  				t.Fatalf(err.Error())
  1056  			},
  1057  		)
  1058  	}
  1059  }
  1060  
  1061  func TestClient_ReloadTLS_DowngradeTLSToPlaintext(t *testing.T) {
  1062  	t.Parallel()
  1063  	assert := assert.New(t)
  1064  
  1065  	s1, addr := testServer(t, func(c *nomad.Config) {
  1066  		c.Region = "regionFoo"
  1067  	})
  1068  	defer s1.Shutdown()
  1069  	testutil.WaitForLeader(t, s1.RPC)
  1070  
  1071  	const (
  1072  		cafile  = "../helper/tlsutil/testdata/ca.pem"
  1073  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
  1074  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
  1075  	)
  1076  
  1077  	c1, cleanup := TestClient(t, func(c *config.Config) {
  1078  		c.Servers = []string{addr}
  1079  		c.TLSConfig = &nconfig.TLSConfig{
  1080  			EnableHTTP:           true,
  1081  			EnableRPC:            true,
  1082  			VerifyServerHostname: true,
  1083  			CAFile:               cafile,
  1084  			CertFile:             foocert,
  1085  			KeyFile:              fookey,
  1086  		}
  1087  	})
  1088  	defer cleanup()
  1089  
  1090  	// assert that when one node is running in encrypted mode, a RPC request to a
  1091  	// node running in plaintext mode should fail
  1092  	{
  1093  		req := structs.NodeSpecificRequest{
  1094  			NodeID:       c1.Node().ID,
  1095  			QueryOptions: structs.QueryOptions{Region: "regionFoo"},
  1096  		}
  1097  		testutil.WaitForResult(func() (bool, error) {
  1098  			var out structs.SingleNodeResponse
  1099  			err := c1.RPC("Node.GetNode", &req, &out)
  1100  			if err == nil {
  1101  				return false, fmt.Errorf("client RPC succeeded when it should have failed :\n%+v", err)
  1102  			}
  1103  			return true, nil
  1104  		}, func(err error) {
  1105  			t.Fatalf(err.Error())
  1106  		},
  1107  		)
  1108  	}
  1109  
  1110  	newConfig := &nconfig.TLSConfig{}
  1111  
  1112  	err := c1.reloadTLSConnections(newConfig)
  1113  	assert.Nil(err)
  1114  
  1115  	// assert that when both nodes are in plaintext mode, a RPC request should
  1116  	// succeed
  1117  	{
  1118  		req := structs.NodeSpecificRequest{
  1119  			NodeID:       c1.Node().ID,
  1120  			QueryOptions: structs.QueryOptions{Region: "regionFoo"},
  1121  		}
  1122  		testutil.WaitForResult(func() (bool, error) {
  1123  			var out structs.SingleNodeResponse
  1124  			err := c1.RPC("Node.GetNode", &req, &out)
  1125  			if err != nil {
  1126  				return false, fmt.Errorf("client RPC failed when it should have succeeded:\n%+v", err)
  1127  			}
  1128  			return true, nil
  1129  		}, func(err error) {
  1130  			t.Fatalf(err.Error())
  1131  		},
  1132  		)
  1133  	}
  1134  }
  1135  
  1136  // TestClient_ServerList tests client methods that interact with the internal
  1137  // nomad server list.
  1138  func TestClient_ServerList(t *testing.T) {
  1139  	t.Parallel()
  1140  	client, cleanup := TestClient(t, func(c *config.Config) {})
  1141  	defer cleanup()
  1142  
  1143  	if s := client.GetServers(); len(s) != 0 {
  1144  		t.Fatalf("expected server lit to be empty but found: %+q", s)
  1145  	}
  1146  	if _, err := client.SetServers(nil); err != noServersErr {
  1147  		t.Fatalf("expected setting an empty list to return a 'no servers' error but received %v", err)
  1148  	}
  1149  	if _, err := client.SetServers([]string{"123.456.13123.123.13:80"}); err == nil {
  1150  		t.Fatalf("expected setting a bad server to return an error")
  1151  	}
  1152  	if _, err := client.SetServers([]string{"123.456.13123.123.13:80", "127.0.0.1:1234", "127.0.0.1"}); err == nil {
  1153  		t.Fatalf("expected setting at least one good server to succeed but received: %v", err)
  1154  	}
  1155  	s := client.GetServers()
  1156  	if len(s) != 0 {
  1157  		t.Fatalf("expected 2 servers but received: %+q", s)
  1158  	}
  1159  }
  1160  
  1161  func TestClient_UpdateNodeFromDevicesAccumulates(t *testing.T) {
  1162  	t.Parallel()
  1163  	client, cleanup := TestClient(t, func(c *config.Config) {})
  1164  	defer cleanup()
  1165  
  1166  	client.updateNodeFromFingerprint(&fingerprint.FingerprintResponse{
  1167  		NodeResources: &structs.NodeResources{
  1168  			Cpu: structs.NodeCpuResources{CpuShares: 123},
  1169  		},
  1170  	})
  1171  
  1172  	client.updateNodeFromFingerprint(&fingerprint.FingerprintResponse{
  1173  		NodeResources: &structs.NodeResources{
  1174  			Memory: structs.NodeMemoryResources{MemoryMB: 1024},
  1175  		},
  1176  	})
  1177  
  1178  	client.updateNodeFromDevices([]*structs.NodeDeviceResource{
  1179  		{
  1180  			Vendor: "vendor",
  1181  			Type:   "type",
  1182  		},
  1183  	})
  1184  
  1185  	// initial check
  1186  	expectedResources := &structs.NodeResources{
  1187  		// computed through test client initialization
  1188  		Networks: client.configCopy.Node.NodeResources.Networks,
  1189  		Disk:     client.configCopy.Node.NodeResources.Disk,
  1190  
  1191  		// injected
  1192  		Cpu:    structs.NodeCpuResources{CpuShares: 123},
  1193  		Memory: structs.NodeMemoryResources{MemoryMB: 1024},
  1194  		Devices: []*structs.NodeDeviceResource{
  1195  			{
  1196  				Vendor: "vendor",
  1197  				Type:   "type",
  1198  			},
  1199  		},
  1200  	}
  1201  
  1202  	assert.EqualValues(t, expectedResources, client.configCopy.Node.NodeResources)
  1203  
  1204  	// overrides of values
  1205  
  1206  	client.updateNodeFromFingerprint(&fingerprint.FingerprintResponse{
  1207  		NodeResources: &structs.NodeResources{
  1208  			Memory: structs.NodeMemoryResources{MemoryMB: 2048},
  1209  		},
  1210  	})
  1211  
  1212  	client.updateNodeFromDevices([]*structs.NodeDeviceResource{
  1213  		{
  1214  			Vendor: "vendor",
  1215  			Type:   "type",
  1216  		},
  1217  		{
  1218  			Vendor: "vendor2",
  1219  			Type:   "type2",
  1220  		},
  1221  	})
  1222  
  1223  	expectedResources2 := &structs.NodeResources{
  1224  		// computed through test client initialization
  1225  		Networks: client.configCopy.Node.NodeResources.Networks,
  1226  		Disk:     client.configCopy.Node.NodeResources.Disk,
  1227  
  1228  		// injected
  1229  		Cpu:    structs.NodeCpuResources{CpuShares: 123},
  1230  		Memory: structs.NodeMemoryResources{MemoryMB: 2048},
  1231  		Devices: []*structs.NodeDeviceResource{
  1232  			{
  1233  				Vendor: "vendor",
  1234  				Type:   "type",
  1235  			},
  1236  			{
  1237  				Vendor: "vendor2",
  1238  				Type:   "type2",
  1239  			},
  1240  		},
  1241  	}
  1242  
  1243  	assert.EqualValues(t, expectedResources2, client.configCopy.Node.NodeResources)
  1244  
  1245  }
  1246  
  1247  // TestClient_UpdateNodeFromFingerprintKeepsConfig asserts manually configured
  1248  // network interfaces take precedence over fingerprinted ones.
  1249  func TestClient_UpdateNodeFromFingerprintKeepsConfig(t *testing.T) {
  1250  	t.Parallel()
  1251  
  1252  	// Client without network configured updates to match fingerprint
  1253  	client, cleanup := TestClient(t, nil)
  1254  	defer cleanup()
  1255  	// capture the platform fingerprinted device name for the next test
  1256  	dev := client.config.Node.NodeResources.Networks[0].Device
  1257  	client.updateNodeFromFingerprint(&fingerprint.FingerprintResponse{
  1258  		NodeResources: &structs.NodeResources{
  1259  			Cpu:      structs.NodeCpuResources{CpuShares: 123},
  1260  			Networks: []*structs.NetworkResource{{Device: "any-interface"}},
  1261  		},
  1262  		Resources: &structs.Resources{
  1263  			CPU:      80,
  1264  			Networks: []*structs.NetworkResource{{Device: "any-interface"}},
  1265  		},
  1266  	})
  1267  	assert.Equal(t, int64(123), client.config.Node.NodeResources.Cpu.CpuShares)
  1268  	assert.Equal(t, "any-interface", client.config.Node.NodeResources.Networks[0].Device)
  1269  	assert.Equal(t, 80, client.config.Node.Resources.CPU)
  1270  	assert.Equal(t, "any-interface", client.config.Node.Resources.Networks[0].Device)
  1271  
  1272  	// Client with network interface configured keeps the config
  1273  	// setting on update
  1274  	name := "TestClient_UpdateNodeFromFingerprintKeepsConfig2"
  1275  	client, cleanup = TestClient(t, func(c *config.Config) {
  1276  		c.NetworkInterface = dev
  1277  		c.Node.Name = name
  1278  		// Node is already a mock.Node, with a device
  1279  		c.Node.NodeResources.Networks[0].Device = dev
  1280  		c.Node.Resources.Networks = c.Node.NodeResources.Networks
  1281  	})
  1282  	defer cleanup()
  1283  	client.updateNodeFromFingerprint(&fingerprint.FingerprintResponse{
  1284  		NodeResources: &structs.NodeResources{
  1285  			Cpu: structs.NodeCpuResources{CpuShares: 123},
  1286  			Networks: []*structs.NetworkResource{
  1287  				{Device: "any-interface", MBits: 20},
  1288  				{Device: dev, MBits: 20},
  1289  			},
  1290  		},
  1291  		Resources: &structs.Resources{
  1292  			CPU:      80,
  1293  			Networks: []*structs.NetworkResource{{Device: "any-interface"}},
  1294  		},
  1295  	})
  1296  	assert.Equal(t, int64(123), client.config.Node.NodeResources.Cpu.CpuShares)
  1297  	// only the configured device is kept
  1298  	assert.Equal(t, 1, len(client.config.Node.NodeResources.Networks))
  1299  	assert.Equal(t, dev, client.config.Node.NodeResources.Networks[0].Device)
  1300  	// network speed updates to the configured network are kept
  1301  	assert.Equal(t, 20, client.config.Node.NodeResources.Networks[0].MBits)
  1302  	assert.Equal(t, 80, client.config.Node.Resources.CPU)
  1303  	assert.Equal(t, dev, client.config.Node.Resources.Networks[0].Device)
  1304  
  1305  	// Network speed is applied to all NetworkResources
  1306  	client.config.NetworkInterface = ""
  1307  	client.config.NetworkSpeed = 100
  1308  	client.updateNodeFromFingerprint(&fingerprint.FingerprintResponse{
  1309  		NodeResources: &structs.NodeResources{
  1310  			Cpu:      structs.NodeCpuResources{CpuShares: 123},
  1311  			Networks: []*structs.NetworkResource{{Device: "any-interface", MBits: 20}},
  1312  		},
  1313  		Resources: &structs.Resources{
  1314  			CPU:      80,
  1315  			Networks: []*structs.NetworkResource{{Device: "any-interface"}},
  1316  		},
  1317  	})
  1318  	assert.Equal(t, "any-interface", client.config.Node.NodeResources.Networks[0].Device)
  1319  	assert.Equal(t, 100, client.config.Node.NodeResources.Networks[0].MBits)
  1320  }
  1321  
  1322  // Support multiple IP addresses (ipv4 vs. 6, e.g.) on the configured network interface
  1323  func Test_UpdateNodeFromFingerprintMultiIP(t *testing.T) {
  1324  	t.Parallel()
  1325  
  1326  	var dev string
  1327  	switch runtime.GOOS {
  1328  	case "linux":
  1329  		dev = "lo"
  1330  	case "darwin":
  1331  		dev = "lo0"
  1332  	}
  1333  
  1334  	// Client without network configured updates to match fingerprint
  1335  	client, cleanup := TestClient(t, func(c *config.Config) {
  1336  		c.NetworkInterface = dev
  1337  		c.Node.NodeResources.Networks[0].Device = dev
  1338  		c.Node.Resources.Networks = c.Node.NodeResources.Networks
  1339  	})
  1340  	defer cleanup()
  1341  
  1342  	client.updateNodeFromFingerprint(&fingerprint.FingerprintResponse{
  1343  		NodeResources: &structs.NodeResources{
  1344  			Cpu: structs.NodeCpuResources{CpuShares: 123},
  1345  			Networks: []*structs.NetworkResource{
  1346  				{Device: dev, IP: "127.0.0.1"},
  1347  				{Device: dev, IP: "::1"},
  1348  			},
  1349  		},
  1350  	})
  1351  
  1352  	two := structs.Networks{
  1353  		{Device: dev, IP: "127.0.0.1"},
  1354  		{Device: dev, IP: "::1"},
  1355  	}
  1356  
  1357  	require.Equal(t, two, client.config.Node.NodeResources.Networks)
  1358  }
  1359  
  1360  func TestClient_computeAllocatedDeviceStats(t *testing.T) {
  1361  	logger := testlog.HCLogger(t)
  1362  	c := &Client{logger: logger}
  1363  
  1364  	newDeviceStats := func(strValue string) *device.DeviceStats {
  1365  		return &device.DeviceStats{
  1366  			Summary: &psstructs.StatValue{
  1367  				StringVal: &strValue,
  1368  			},
  1369  		}
  1370  	}
  1371  
  1372  	allocatedDevices := []*structs.AllocatedDeviceResource{
  1373  		{
  1374  			Vendor:    "vendor",
  1375  			Type:      "type",
  1376  			Name:      "name",
  1377  			DeviceIDs: []string{"d2", "d3", "notfoundid"},
  1378  		},
  1379  		{
  1380  			Vendor:    "vendor2",
  1381  			Type:      "type2",
  1382  			Name:      "name2",
  1383  			DeviceIDs: []string{"a2"},
  1384  		},
  1385  		{
  1386  			Vendor:    "vendor_notfound",
  1387  			Type:      "type_notfound",
  1388  			Name:      "name_notfound",
  1389  			DeviceIDs: []string{"d3"},
  1390  		},
  1391  	}
  1392  
  1393  	hostDeviceGroupStats := []*device.DeviceGroupStats{
  1394  		{
  1395  			Vendor: "vendor",
  1396  			Type:   "type",
  1397  			Name:   "name",
  1398  			InstanceStats: map[string]*device.DeviceStats{
  1399  				"unallocated": newDeviceStats("unallocated"),
  1400  				"d2":          newDeviceStats("d2"),
  1401  				"d3":          newDeviceStats("d3"),
  1402  			},
  1403  		},
  1404  		{
  1405  			Vendor: "vendor2",
  1406  			Type:   "type2",
  1407  			Name:   "name2",
  1408  			InstanceStats: map[string]*device.DeviceStats{
  1409  				"a2": newDeviceStats("a2"),
  1410  			},
  1411  		},
  1412  		{
  1413  			Vendor: "vendor_unused",
  1414  			Type:   "type_unused",
  1415  			Name:   "name_unused",
  1416  			InstanceStats: map[string]*device.DeviceStats{
  1417  				"unallocated_unused": newDeviceStats("unallocated_unused"),
  1418  			},
  1419  		},
  1420  	}
  1421  
  1422  	// test some edge conditions
  1423  	assert.Empty(t, c.computeAllocatedDeviceGroupStats(nil, nil))
  1424  	assert.Empty(t, c.computeAllocatedDeviceGroupStats(nil, hostDeviceGroupStats))
  1425  	assert.Empty(t, c.computeAllocatedDeviceGroupStats(allocatedDevices, nil))
  1426  
  1427  	// actual test
  1428  	result := c.computeAllocatedDeviceGroupStats(allocatedDevices, hostDeviceGroupStats)
  1429  	sort.Slice(result, func(i, j int) bool {
  1430  		return result[i].Vendor < result[j].Vendor
  1431  	})
  1432  
  1433  	expected := []*device.DeviceGroupStats{
  1434  		{
  1435  			Vendor: "vendor",
  1436  			Type:   "type",
  1437  			Name:   "name",
  1438  			InstanceStats: map[string]*device.DeviceStats{
  1439  				"d2": newDeviceStats("d2"),
  1440  				"d3": newDeviceStats("d3"),
  1441  			},
  1442  		},
  1443  		{
  1444  			Vendor: "vendor2",
  1445  			Type:   "type2",
  1446  			Name:   "name2",
  1447  			InstanceStats: map[string]*device.DeviceStats{
  1448  				"a2": newDeviceStats("a2"),
  1449  			},
  1450  		},
  1451  	}
  1452  
  1453  	assert.EqualValues(t, expected, result)
  1454  }
  1455  
  1456  func TestClient_getAllocatedResources(t *testing.T) {
  1457  	t.Parallel()
  1458  	require := require.New(t)
  1459  	client, cleanup := TestClient(t, nil)
  1460  	defer cleanup()
  1461  
  1462  	allocStops := mock.BatchAlloc()
  1463  	allocStops.Job.TaskGroups[0].Count = 1
  1464  	allocStops.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
  1465  	allocStops.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
  1466  		"run_for":   "1ms",
  1467  		"exit_code": "0",
  1468  	}
  1469  	allocStops.Job.TaskGroups[0].RestartPolicy.Attempts = 0
  1470  	allocStops.AllocatedResources.Shared.DiskMB = 64
  1471  	allocStops.AllocatedResources.Tasks["web"].Cpu = structs.AllocatedCpuResources{CpuShares: 64}
  1472  	allocStops.AllocatedResources.Tasks["web"].Memory = structs.AllocatedMemoryResources{MemoryMB: 64}
  1473  	require.Nil(client.addAlloc(allocStops, ""))
  1474  
  1475  	allocFails := mock.BatchAlloc()
  1476  	allocFails.Job.TaskGroups[0].Count = 1
  1477  	allocFails.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
  1478  	allocFails.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
  1479  		"run_for":   "1ms",
  1480  		"exit_code": "1",
  1481  	}
  1482  	allocFails.Job.TaskGroups[0].RestartPolicy.Attempts = 0
  1483  	allocFails.AllocatedResources.Shared.DiskMB = 128
  1484  	allocFails.AllocatedResources.Tasks["web"].Cpu = structs.AllocatedCpuResources{CpuShares: 128}
  1485  	allocFails.AllocatedResources.Tasks["web"].Memory = structs.AllocatedMemoryResources{MemoryMB: 128}
  1486  	require.Nil(client.addAlloc(allocFails, ""))
  1487  
  1488  	allocRuns := mock.Alloc()
  1489  	allocRuns.Job.TaskGroups[0].Count = 1
  1490  	allocRuns.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
  1491  	allocRuns.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
  1492  		"run_for": "3s",
  1493  	}
  1494  	allocRuns.AllocatedResources.Shared.DiskMB = 256
  1495  	allocRuns.AllocatedResources.Tasks["web"].Cpu = structs.AllocatedCpuResources{CpuShares: 256}
  1496  	allocRuns.AllocatedResources.Tasks["web"].Memory = structs.AllocatedMemoryResources{MemoryMB: 256}
  1497  	require.Nil(client.addAlloc(allocRuns, ""))
  1498  
  1499  	allocPends := mock.Alloc()
  1500  	allocPends.Job.TaskGroups[0].Count = 1
  1501  	allocPends.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
  1502  	allocPends.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
  1503  		"run_for":         "5s",
  1504  		"start_block_for": "10s",
  1505  	}
  1506  	allocPends.AllocatedResources.Shared.DiskMB = 512
  1507  	allocPends.AllocatedResources.Tasks["web"].Cpu = structs.AllocatedCpuResources{CpuShares: 512}
  1508  	allocPends.AllocatedResources.Tasks["web"].Memory = structs.AllocatedMemoryResources{MemoryMB: 512}
  1509  	require.Nil(client.addAlloc(allocPends, ""))
  1510  
  1511  	// wait for allocStops to stop running and for allocRuns to be pending/running
  1512  	testutil.WaitForResult(func() (bool, error) {
  1513  		as, err := client.GetAllocState(allocPends.ID)
  1514  		if err != nil {
  1515  			return false, err
  1516  		} else if as.ClientStatus != structs.AllocClientStatusPending {
  1517  			return false, fmt.Errorf("allocPends not yet pending: %#v", as)
  1518  		}
  1519  
  1520  		as, err = client.GetAllocState(allocRuns.ID)
  1521  		if as.ClientStatus != structs.AllocClientStatusRunning {
  1522  			return false, fmt.Errorf("allocRuns not yet running: %#v", as)
  1523  		} else if err != nil {
  1524  			return false, err
  1525  		}
  1526  
  1527  		as, err = client.GetAllocState(allocStops.ID)
  1528  		if err != nil {
  1529  			return false, err
  1530  		} else if as.ClientStatus != structs.AllocClientStatusComplete {
  1531  			return false, fmt.Errorf("allocStops not yet complete: %#v", as)
  1532  		}
  1533  
  1534  		as, err = client.GetAllocState(allocFails.ID)
  1535  		if err != nil {
  1536  			return false, err
  1537  		} else if as.ClientStatus != structs.AllocClientStatusFailed {
  1538  			return false, fmt.Errorf("allocFails not yet failed: %#v", as)
  1539  		}
  1540  
  1541  		return true, nil
  1542  	}, func(err error) {
  1543  		require.NoError(err)
  1544  	})
  1545  
  1546  	result := client.getAllocatedResources(client.config.Node)
  1547  
  1548  	expected := structs.ComparableResources{
  1549  		Flattened: structs.AllocatedTaskResources{
  1550  			Cpu: structs.AllocatedCpuResources{
  1551  				CpuShares: 768,
  1552  			},
  1553  			Memory: structs.AllocatedMemoryResources{
  1554  				MemoryMB: 768,
  1555  			},
  1556  			Networks: nil,
  1557  		},
  1558  		Shared: structs.AllocatedSharedResources{
  1559  			DiskMB: 768,
  1560  		},
  1561  	}
  1562  
  1563  	assert.EqualValues(t, expected, *result)
  1564  }
  1565  
  1566  func TestClient_updateNodeFromDriverUpdatesAll(t *testing.T) {
  1567  	t.Parallel()
  1568  	client, cleanup := TestClient(t, nil)
  1569  	defer cleanup()
  1570  
  1571  	// initial update
  1572  	{
  1573  		info := &structs.DriverInfo{
  1574  			Detected:          true,
  1575  			Healthy:           false,
  1576  			HealthDescription: "not healthy at start",
  1577  			Attributes: map[string]string{
  1578  				"node.mock.testattr1": "val1",
  1579  			},
  1580  		}
  1581  		client.updateNodeFromDriver("mock", info)
  1582  		n := client.config.Node
  1583  
  1584  		updatedInfo := *n.Drivers["mock"]
  1585  		// compare without update time
  1586  		updatedInfo.UpdateTime = info.UpdateTime
  1587  		assert.EqualValues(t, updatedInfo, *info)
  1588  
  1589  		// check node attributes
  1590  		assert.Equal(t, "val1", n.Attributes["node.mock.testattr1"])
  1591  	}
  1592  
  1593  	// initial update
  1594  	{
  1595  		info := &structs.DriverInfo{
  1596  			Detected:          true,
  1597  			Healthy:           true,
  1598  			HealthDescription: "healthy",
  1599  			Attributes: map[string]string{
  1600  				"node.mock.testattr1": "val2",
  1601  			},
  1602  		}
  1603  		client.updateNodeFromDriver("mock", info)
  1604  		n := client.Node()
  1605  
  1606  		updatedInfo := *n.Drivers["mock"]
  1607  		// compare without update time
  1608  		updatedInfo.UpdateTime = info.UpdateTime
  1609  		assert.EqualValues(t, updatedInfo, *info)
  1610  
  1611  		// check node attributes are updated
  1612  		assert.Equal(t, "val2", n.Attributes["node.mock.testattr1"])
  1613  
  1614  		// update once more with the same info, updateTime shouldn't change
  1615  		client.updateNodeFromDriver("mock", info)
  1616  		un := client.Node()
  1617  		assert.EqualValues(t, n, un)
  1618  	}
  1619  
  1620  	// update once more to unhealthy because why not
  1621  	{
  1622  		info := &structs.DriverInfo{
  1623  			Detected:          true,
  1624  			Healthy:           false,
  1625  			HealthDescription: "lost track",
  1626  			Attributes: map[string]string{
  1627  				"node.mock.testattr1": "",
  1628  			},
  1629  		}
  1630  		client.updateNodeFromDriver("mock", info)
  1631  		n := client.Node()
  1632  
  1633  		updatedInfo := *n.Drivers["mock"]
  1634  		// compare without update time
  1635  		updatedInfo.UpdateTime = info.UpdateTime
  1636  		assert.EqualValues(t, updatedInfo, *info)
  1637  
  1638  		// check node attributes are updated
  1639  		assert.Equal(t, "", n.Attributes["node.mock.testattr1"])
  1640  
  1641  		// update once more with the same info, updateTime shouldn't change
  1642  		client.updateNodeFromDriver("mock", info)
  1643  		un := client.Node()
  1644  		assert.EqualValues(t, n, un)
  1645  	}
  1646  }