github.com/diptanu/nomad@v0.5.7-0.20170516172507-d72e86cbe3d9/client/client_test.go (about)

     1  package client
     2  
     3  import (
     4  	"archive/tar"
     5  	"bytes"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  	"log"
    10  	"net"
    11  	"os"
    12  	"path/filepath"
    13  	"runtime"
    14  	"sync/atomic"
    15  	"testing"
    16  	"time"
    17  
    18  	memdb "github.com/hashicorp/go-memdb"
    19  	"github.com/hashicorp/nomad/client/config"
    20  	"github.com/hashicorp/nomad/command/agent/consul"
    21  	"github.com/hashicorp/nomad/nomad"
    22  	"github.com/hashicorp/nomad/nomad/mock"
    23  	"github.com/hashicorp/nomad/nomad/structs"
    24  	nconfig "github.com/hashicorp/nomad/nomad/structs/config"
    25  	"github.com/hashicorp/nomad/testutil"
    26  	"github.com/mitchellh/hashstructure"
    27  
    28  	ctestutil "github.com/hashicorp/nomad/client/testutil"
    29  )
    30  
    31  var (
    32  	nextPort uint32 = 16000
    33  
    34  	osExecDriverSupport = map[string]bool{
    35  		"linux": true,
    36  	}
    37  )
    38  
    39  func getPort() int {
    40  	return int(atomic.AddUint32(&nextPort, 1))
    41  }
    42  
    43  func testServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string) {
    44  	f := false
    45  
    46  	// Setup the default settings
    47  	config := nomad.DefaultConfig()
    48  	config.VaultConfig.Enabled = &f
    49  	config.Build = "unittest"
    50  	config.DevMode = true
    51  	config.RPCAddr = &net.TCPAddr{
    52  		IP:   []byte{127, 0, 0, 1},
    53  		Port: getPort(),
    54  	}
    55  	config.NodeName = fmt.Sprintf("Node %d", config.RPCAddr.Port)
    56  
    57  	// Tighten the Serf timing
    58  	config.SerfConfig.MemberlistConfig.BindAddr = "127.0.0.1"
    59  	config.SerfConfig.MemberlistConfig.BindPort = getPort()
    60  	config.SerfConfig.MemberlistConfig.SuspicionMult = 2
    61  	config.SerfConfig.MemberlistConfig.RetransmitMult = 2
    62  	config.SerfConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
    63  	config.SerfConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
    64  	config.SerfConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
    65  
    66  	// Tighten the Raft timing
    67  	config.RaftConfig.LeaderLeaseTimeout = 20 * time.Millisecond
    68  	config.RaftConfig.HeartbeatTimeout = 40 * time.Millisecond
    69  	config.RaftConfig.ElectionTimeout = 40 * time.Millisecond
    70  	config.RaftConfig.StartAsLeader = true
    71  	config.RaftTimeout = 500 * time.Millisecond
    72  
    73  	// Invoke the callback if any
    74  	if cb != nil {
    75  		cb(config)
    76  	}
    77  
    78  	logger := log.New(config.LogOutput, "", log.LstdFlags)
    79  	catalog := consul.NewMockCatalog(logger)
    80  
    81  	// Create server
    82  	server, err := nomad.NewServer(config, catalog, logger)
    83  	if err != nil {
    84  		t.Fatalf("err: %v", err)
    85  	}
    86  	return server, config.RPCAddr.String()
    87  }
    88  
    89  func testClient(t *testing.T, cb func(c *config.Config)) *Client {
    90  	f := false
    91  
    92  	conf := config.DefaultConfig()
    93  	conf.VaultConfig.Enabled = &f
    94  	conf.DevMode = true
    95  	conf.Node = &structs.Node{
    96  		Reserved: &structs.Resources{
    97  			DiskMB: 0,
    98  		},
    99  	}
   100  	if cb != nil {
   101  		cb(conf)
   102  	}
   103  
   104  	logger := log.New(conf.LogOutput, "", log.LstdFlags)
   105  	catalog := consul.NewMockCatalog(logger)
   106  	mockService := newMockConsulServiceClient()
   107  	mockService.logger = logger
   108  	client, err := NewClient(conf, catalog, mockService, logger)
   109  	if err != nil {
   110  		t.Fatalf("err: %v", err)
   111  	}
   112  	return client
   113  }
   114  
   115  func TestClient_StartStop(t *testing.T) {
   116  	client := testClient(t, nil)
   117  	if err := client.Shutdown(); err != nil {
   118  		t.Fatalf("err: %v", err)
   119  	}
   120  }
   121  
   122  func TestClient_RPC(t *testing.T) {
   123  	s1, addr := testServer(t, nil)
   124  	defer s1.Shutdown()
   125  
   126  	c1 := testClient(t, func(c *config.Config) {
   127  		c.Servers = []string{addr}
   128  	})
   129  	defer c1.Shutdown()
   130  
   131  	// RPC should succeed
   132  	testutil.WaitForResult(func() (bool, error) {
   133  		var out struct{}
   134  		err := c1.RPC("Status.Ping", struct{}{}, &out)
   135  		return err == nil, err
   136  	}, func(err error) {
   137  		t.Fatalf("err: %v", err)
   138  	})
   139  }
   140  
   141  func TestClient_RPC_Passthrough(t *testing.T) {
   142  	s1, _ := testServer(t, nil)
   143  	defer s1.Shutdown()
   144  
   145  	c1 := testClient(t, func(c *config.Config) {
   146  		c.RPCHandler = s1
   147  	})
   148  	defer c1.Shutdown()
   149  
   150  	// RPC should succeed
   151  	testutil.WaitForResult(func() (bool, error) {
   152  		var out struct{}
   153  		err := c1.RPC("Status.Ping", struct{}{}, &out)
   154  		return err == nil, err
   155  	}, func(err error) {
   156  		t.Fatalf("err: %v", err)
   157  	})
   158  }
   159  
   160  func TestClient_Fingerprint(t *testing.T) {
   161  	c := testClient(t, nil)
   162  	defer c.Shutdown()
   163  
   164  	// Ensure kernel and arch are always present
   165  	node := c.Node()
   166  	if node.Attributes["kernel.name"] == "" {
   167  		t.Fatalf("missing kernel.name")
   168  	}
   169  	if node.Attributes["cpu.arch"] == "" {
   170  		t.Fatalf("missing cpu arch")
   171  	}
   172  }
   173  
   174  func TestClient_HasNodeChanged(t *testing.T) {
   175  	c := testClient(t, nil)
   176  	defer c.Shutdown()
   177  
   178  	node := c.Node()
   179  	attrHash, err := hashstructure.Hash(node.Attributes, nil)
   180  	if err != nil {
   181  		c.logger.Printf("[DEBUG] client: unable to calculate node attributes hash: %v", err)
   182  	}
   183  	// Calculate node meta map hash
   184  	metaHash, err := hashstructure.Hash(node.Meta, nil)
   185  	if err != nil {
   186  		c.logger.Printf("[DEBUG] client: unable to calculate node meta hash: %v", err)
   187  	}
   188  	if changed, _, _ := c.hasNodeChanged(attrHash, metaHash); changed {
   189  		t.Fatalf("Unexpected hash change.")
   190  	}
   191  
   192  	// Change node attribute
   193  	node.Attributes["arch"] = "xyz_86"
   194  	if changed, newAttrHash, _ := c.hasNodeChanged(attrHash, metaHash); !changed {
   195  		t.Fatalf("Expected hash change in attributes: %d vs %d", attrHash, newAttrHash)
   196  	}
   197  
   198  	// Change node meta map
   199  	node.Meta["foo"] = "bar"
   200  	if changed, _, newMetaHash := c.hasNodeChanged(attrHash, metaHash); !changed {
   201  		t.Fatalf("Expected hash change in meta map: %d vs %d", metaHash, newMetaHash)
   202  	}
   203  }
   204  
   205  func TestClient_Fingerprint_InWhitelist(t *testing.T) {
   206  	c := testClient(t, func(c *config.Config) {
   207  		if c.Options == nil {
   208  			c.Options = make(map[string]string)
   209  		}
   210  
   211  		// Weird spacing to test trimming. Whitelist all modules expect cpu.
   212  		c.Options["fingerprint.whitelist"] = "  arch, consul,cpu,env_aws,env_gce,host,memory,network,storage,foo,bar	"
   213  	})
   214  	defer c.Shutdown()
   215  
   216  	node := c.Node()
   217  	if node.Attributes["cpu.frequency"] == "" {
   218  		t.Fatalf("missing cpu fingerprint module")
   219  	}
   220  }
   221  
   222  func TestClient_Fingerprint_InBlacklist(t *testing.T) {
   223  	c := testClient(t, func(c *config.Config) {
   224  		if c.Options == nil {
   225  			c.Options = make(map[string]string)
   226  		}
   227  
   228  		// Weird spacing to test trimming. Blacklist cpu.
   229  		c.Options["fingerprint.blacklist"] = "  cpu	"
   230  	})
   231  	defer c.Shutdown()
   232  
   233  	node := c.Node()
   234  	if node.Attributes["cpu.frequency"] != "" {
   235  		t.Fatalf("cpu fingerprint module loaded despite blacklisting")
   236  	}
   237  }
   238  
   239  func TestClient_Fingerprint_OutOfWhitelist(t *testing.T) {
   240  	c := testClient(t, func(c *config.Config) {
   241  		if c.Options == nil {
   242  			c.Options = make(map[string]string)
   243  		}
   244  
   245  		c.Options["fingerprint.whitelist"] = "arch,consul,env_aws,env_gce,host,memory,network,storage,foo,bar"
   246  	})
   247  	defer c.Shutdown()
   248  
   249  	node := c.Node()
   250  	if node.Attributes["cpu.frequency"] != "" {
   251  		t.Fatalf("found cpu fingerprint module")
   252  	}
   253  }
   254  
   255  func TestClient_Fingerprint_WhitelistBlacklistCombination(t *testing.T) {
   256  	c := testClient(t, func(c *config.Config) {
   257  		if c.Options == nil {
   258  			c.Options = make(map[string]string)
   259  		}
   260  
   261  		// With both white- and blacklist, should return the set difference of modules (arch, cpu)
   262  		c.Options["fingerprint.whitelist"] = "arch,memory,cpu"
   263  		c.Options["fingerprint.blacklist"] = "memory,nomad"
   264  	})
   265  	defer c.Shutdown()
   266  
   267  	node := c.Node()
   268  	// Check expected modules are present
   269  	if node.Attributes["cpu.frequency"] == "" {
   270  		t.Fatalf("missing cpu fingerprint module")
   271  	}
   272  	if node.Attributes["cpu.arch"] == "" {
   273  		t.Fatalf("missing arch fingerprint module")
   274  	}
   275  	// Check remainder _not_ present
   276  	if node.Attributes["memory.totalbytes"] != "" {
   277  		t.Fatalf("found memory fingerprint module")
   278  	}
   279  	if node.Attributes["nomad.version"] != "" {
   280  		t.Fatalf("found nomad fingerprint module")
   281  	}
   282  }
   283  
   284  func TestClient_Drivers(t *testing.T) {
   285  	c := testClient(t, nil)
   286  	defer c.Shutdown()
   287  
   288  	node := c.Node()
   289  	if node.Attributes["driver.exec"] == "" {
   290  		if v, ok := osExecDriverSupport[runtime.GOOS]; v && ok {
   291  			t.Fatalf("missing exec driver")
   292  		} else {
   293  			t.Skipf("missing exec driver, no OS support")
   294  		}
   295  	}
   296  }
   297  
   298  func TestClient_Drivers_InWhitelist(t *testing.T) {
   299  	c := testClient(t, func(c *config.Config) {
   300  		if c.Options == nil {
   301  			c.Options = make(map[string]string)
   302  		}
   303  
   304  		// Weird spacing to test trimming
   305  		c.Options["driver.whitelist"] = "   exec ,  foo	"
   306  	})
   307  	defer c.Shutdown()
   308  
   309  	node := c.Node()
   310  	if node.Attributes["driver.exec"] == "" {
   311  		if v, ok := osExecDriverSupport[runtime.GOOS]; v && ok {
   312  			t.Fatalf("missing exec driver")
   313  		} else {
   314  			t.Skipf("missing exec driver, no OS support")
   315  		}
   316  	}
   317  }
   318  
   319  func TestClient_Drivers_InBlacklist(t *testing.T) {
   320  	c := testClient(t, func(c *config.Config) {
   321  		if c.Options == nil {
   322  			c.Options = make(map[string]string)
   323  		}
   324  
   325  		// Weird spacing to test trimming
   326  		c.Options["driver.blacklist"] = "   exec ,  foo	"
   327  	})
   328  	defer c.Shutdown()
   329  
   330  	node := c.Node()
   331  	if node.Attributes["driver.exec"] != "" {
   332  		if v, ok := osExecDriverSupport[runtime.GOOS]; !v && ok {
   333  			t.Fatalf("exec driver loaded despite blacklist")
   334  		} else {
   335  			t.Skipf("missing exec driver, no OS support")
   336  		}
   337  	}
   338  }
   339  
   340  func TestClient_Drivers_OutOfWhitelist(t *testing.T) {
   341  	c := testClient(t, func(c *config.Config) {
   342  		if c.Options == nil {
   343  			c.Options = make(map[string]string)
   344  		}
   345  
   346  		c.Options["driver.whitelist"] = "foo,bar,baz"
   347  	})
   348  	defer c.Shutdown()
   349  
   350  	node := c.Node()
   351  	if node.Attributes["driver.exec"] != "" {
   352  		t.Fatalf("found exec driver")
   353  	}
   354  }
   355  
   356  func TestClient_Drivers_WhitelistBlacklistCombination(t *testing.T) {
   357  	c := testClient(t, func(c *config.Config) {
   358  		if c.Options == nil {
   359  			c.Options = make(map[string]string)
   360  		}
   361  
   362  		// Expected output is set difference (raw_exec)
   363  		c.Options["driver.whitelist"] = "raw_exec,exec"
   364  		c.Options["driver.blacklist"] = "exec"
   365  	})
   366  	defer c.Shutdown()
   367  
   368  	node := c.Node()
   369  	// Check expected present
   370  	if node.Attributes["driver.raw_exec"] == "" {
   371  		t.Fatalf("missing raw_exec driver")
   372  	}
   373  	// Check expected absent
   374  	if node.Attributes["driver.exec"] != "" {
   375  		t.Fatalf("exec driver loaded despite blacklist")
   376  	}
   377  }
   378  
   379  // TestClient_MixedTLS asserts that when a server is running with TLS enabled
   380  // it will reject any RPC connections from clients that lack TLS. See #2525
   381  func TestClient_MixedTLS(t *testing.T) {
   382  	const (
   383  		cafile  = "../helper/tlsutil/testdata/ca.pem"
   384  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
   385  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
   386  	)
   387  	s1, addr := testServer(t, func(c *nomad.Config) {
   388  		c.TLSConfig = &nconfig.TLSConfig{
   389  			EnableHTTP:           true,
   390  			EnableRPC:            true,
   391  			VerifyServerHostname: true,
   392  			CAFile:               cafile,
   393  			CertFile:             foocert,
   394  			KeyFile:              fookey,
   395  		}
   396  	})
   397  	defer s1.Shutdown()
   398  	testutil.WaitForLeader(t, s1.RPC)
   399  
   400  	c1 := testClient(t, func(c *config.Config) {
   401  		c.Servers = []string{addr}
   402  	})
   403  	defer c1.Shutdown()
   404  
   405  	req := structs.NodeSpecificRequest{
   406  		NodeID:       c1.Node().ID,
   407  		QueryOptions: structs.QueryOptions{Region: "global"},
   408  	}
   409  	var out structs.SingleNodeResponse
   410  	testutil.AssertUntil(100*time.Millisecond,
   411  		func() (bool, error) {
   412  			err := c1.RPC("Node.GetNode", &req, &out)
   413  			if err == nil {
   414  				return false, fmt.Errorf("client RPC succeeded when it should have failed:\n%+v", out)
   415  			}
   416  			return true, nil
   417  		},
   418  		func(err error) {
   419  			t.Fatalf(err.Error())
   420  		},
   421  	)
   422  }
   423  
   424  // TestClient_BadTLS asserts that when a client and server are running with TLS
   425  // enabled -- but their certificates are signed by different CAs -- they're
   426  // unable to communicate.
   427  func TestClient_BadTLS(t *testing.T) {
   428  	const (
   429  		cafile  = "../helper/tlsutil/testdata/ca.pem"
   430  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
   431  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
   432  		badca   = "../helper/tlsutil/testdata/ca-bad.pem"
   433  		badcert = "../helper/tlsutil/testdata/nomad-bad.pem"
   434  		badkey  = "../helper/tlsutil/testdata/nomad-bad-key.pem"
   435  	)
   436  	s1, addr := testServer(t, func(c *nomad.Config) {
   437  		c.TLSConfig = &nconfig.TLSConfig{
   438  			EnableHTTP:           true,
   439  			EnableRPC:            true,
   440  			VerifyServerHostname: true,
   441  			CAFile:               cafile,
   442  			CertFile:             foocert,
   443  			KeyFile:              fookey,
   444  		}
   445  	})
   446  	defer s1.Shutdown()
   447  	testutil.WaitForLeader(t, s1.RPC)
   448  
   449  	c1 := testClient(t, func(c *config.Config) {
   450  		c.Servers = []string{addr}
   451  		c.TLSConfig = &nconfig.TLSConfig{
   452  			EnableHTTP:           true,
   453  			EnableRPC:            true,
   454  			VerifyServerHostname: true,
   455  			CAFile:               badca,
   456  			CertFile:             badcert,
   457  			KeyFile:              badkey,
   458  		}
   459  	})
   460  	defer c1.Shutdown()
   461  
   462  	req := structs.NodeSpecificRequest{
   463  		NodeID:       c1.Node().ID,
   464  		QueryOptions: structs.QueryOptions{Region: "global"},
   465  	}
   466  	var out structs.SingleNodeResponse
   467  	testutil.AssertUntil(100*time.Millisecond,
   468  		func() (bool, error) {
   469  			err := c1.RPC("Node.GetNode", &req, &out)
   470  			if err == nil {
   471  				return false, fmt.Errorf("client RPC succeeded when it should have failed:\n%+v", out)
   472  			}
   473  			return true, nil
   474  		},
   475  		func(err error) {
   476  			t.Fatalf(err.Error())
   477  		},
   478  	)
   479  }
   480  
   481  func TestClient_Register(t *testing.T) {
   482  	s1, _ := testServer(t, nil)
   483  	defer s1.Shutdown()
   484  	testutil.WaitForLeader(t, s1.RPC)
   485  
   486  	c1 := testClient(t, func(c *config.Config) {
   487  		c.RPCHandler = s1
   488  	})
   489  	defer c1.Shutdown()
   490  
   491  	req := structs.NodeSpecificRequest{
   492  		NodeID:       c1.Node().ID,
   493  		QueryOptions: structs.QueryOptions{Region: "global"},
   494  	}
   495  	var out structs.SingleNodeResponse
   496  
   497  	// Register should succeed
   498  	testutil.WaitForResult(func() (bool, error) {
   499  		err := s1.RPC("Node.GetNode", &req, &out)
   500  		if err != nil {
   501  			return false, err
   502  		}
   503  		if out.Node == nil {
   504  			return false, fmt.Errorf("missing reg")
   505  		}
   506  		return out.Node.ID == req.NodeID, nil
   507  	}, func(err error) {
   508  		t.Fatalf("err: %v", err)
   509  	})
   510  }
   511  
   512  func TestClient_Heartbeat(t *testing.T) {
   513  	s1, _ := testServer(t, func(c *nomad.Config) {
   514  		c.MinHeartbeatTTL = 50 * time.Millisecond
   515  	})
   516  	defer s1.Shutdown()
   517  	testutil.WaitForLeader(t, s1.RPC)
   518  
   519  	c1 := testClient(t, func(c *config.Config) {
   520  		c.RPCHandler = s1
   521  	})
   522  	defer c1.Shutdown()
   523  
   524  	req := structs.NodeSpecificRequest{
   525  		NodeID:       c1.Node().ID,
   526  		QueryOptions: structs.QueryOptions{Region: "global"},
   527  	}
   528  	var out structs.SingleNodeResponse
   529  
   530  	// Register should succeed
   531  	testutil.WaitForResult(func() (bool, error) {
   532  		err := s1.RPC("Node.GetNode", &req, &out)
   533  		if err != nil {
   534  			return false, err
   535  		}
   536  		if out.Node == nil {
   537  			return false, fmt.Errorf("missing reg")
   538  		}
   539  		return out.Node.Status == structs.NodeStatusReady, nil
   540  	}, func(err error) {
   541  		t.Fatalf("err: %v", err)
   542  	})
   543  }
   544  
   545  func TestClient_UpdateAllocStatus(t *testing.T) {
   546  	s1, _ := testServer(t, nil)
   547  	defer s1.Shutdown()
   548  	testutil.WaitForLeader(t, s1.RPC)
   549  
   550  	c1 := testClient(t, func(c *config.Config) {
   551  		c.RPCHandler = s1
   552  	})
   553  	defer c1.Shutdown()
   554  
   555  	// Wait til the node is ready
   556  	waitTilNodeReady(c1, t)
   557  
   558  	job := mock.Job()
   559  	alloc := mock.Alloc()
   560  	alloc.NodeID = c1.Node().ID
   561  	alloc.Job = job
   562  	alloc.JobID = job.ID
   563  	originalStatus := "foo"
   564  	alloc.ClientStatus = originalStatus
   565  
   566  	// Insert at zero so they are pulled
   567  	state := s1.State()
   568  	if err := state.UpsertJob(0, job); err != nil {
   569  		t.Fatal(err)
   570  	}
   571  	if err := state.UpsertJobSummary(100, mock.JobSummary(alloc.JobID)); err != nil {
   572  		t.Fatal(err)
   573  	}
   574  	state.UpsertAllocs(101, []*structs.Allocation{alloc})
   575  
   576  	testutil.WaitForResult(func() (bool, error) {
   577  		ws := memdb.NewWatchSet()
   578  		out, err := state.AllocByID(ws, alloc.ID)
   579  		if err != nil {
   580  			return false, err
   581  		}
   582  		if out == nil {
   583  			return false, fmt.Errorf("no such alloc")
   584  		}
   585  		if out.ClientStatus == originalStatus {
   586  			return false, fmt.Errorf("Alloc client status not updated; got %v", out.ClientStatus)
   587  		}
   588  		return true, nil
   589  	}, func(err error) {
   590  		t.Fatalf("err: %v", err)
   591  	})
   592  }
   593  
   594  func TestClient_WatchAllocs(t *testing.T) {
   595  	ctestutil.ExecCompatible(t)
   596  	s1, _ := testServer(t, nil)
   597  	defer s1.Shutdown()
   598  	testutil.WaitForLeader(t, s1.RPC)
   599  
   600  	c1 := testClient(t, func(c *config.Config) {
   601  		c.RPCHandler = s1
   602  	})
   603  	defer c1.Shutdown()
   604  
   605  	// Wait til the node is ready
   606  	waitTilNodeReady(c1, t)
   607  
   608  	// Create mock allocations
   609  	job := mock.Job()
   610  	alloc1 := mock.Alloc()
   611  	alloc1.JobID = job.ID
   612  	alloc1.Job = job
   613  	alloc1.NodeID = c1.Node().ID
   614  	alloc2 := mock.Alloc()
   615  	alloc2.NodeID = c1.Node().ID
   616  	alloc2.JobID = job.ID
   617  	alloc2.Job = job
   618  
   619  	// Insert at zero so they are pulled
   620  	state := s1.State()
   621  	if err := state.UpsertJob(100, job); err != nil {
   622  		t.Fatal(err)
   623  	}
   624  	if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
   625  		t.Fatal(err)
   626  	}
   627  	err := state.UpsertAllocs(102, []*structs.Allocation{alloc1, alloc2})
   628  	if err != nil {
   629  		t.Fatalf("err: %v", err)
   630  	}
   631  
   632  	// Both allocations should get registered
   633  	testutil.WaitForResult(func() (bool, error) {
   634  		c1.allocLock.RLock()
   635  		num := len(c1.allocs)
   636  		c1.allocLock.RUnlock()
   637  		return num == 2, nil
   638  	}, func(err error) {
   639  		t.Fatalf("err: %v", err)
   640  	})
   641  
   642  	// Delete one allocation
   643  	err = state.DeleteEval(103, nil, []string{alloc1.ID})
   644  	if err != nil {
   645  		t.Fatalf("err: %v", err)
   646  	}
   647  
   648  	// Update the other allocation. Have to make a copy because the allocs are
   649  	// shared in memory in the test and the modify index would be updated in the
   650  	// alloc runner.
   651  	alloc2_2 := new(structs.Allocation)
   652  	*alloc2_2 = *alloc2
   653  	alloc2_2.DesiredStatus = structs.AllocDesiredStatusStop
   654  	err = state.UpsertAllocs(104, []*structs.Allocation{alloc2_2})
   655  	if err != nil {
   656  		t.Fatalf("err: %v", err)
   657  	}
   658  
   659  	// One allocations should get de-registered
   660  	testutil.WaitForResult(func() (bool, error) {
   661  		c1.allocLock.RLock()
   662  		num := len(c1.allocs)
   663  		c1.allocLock.RUnlock()
   664  		return num == 1, nil
   665  	}, func(err error) {
   666  		t.Fatalf("err: %v", err)
   667  	})
   668  
   669  	// One allocations should get updated
   670  	testutil.WaitForResult(func() (bool, error) {
   671  		c1.allocLock.RLock()
   672  		ar := c1.allocs[alloc2.ID]
   673  		c1.allocLock.RUnlock()
   674  		return ar.Alloc().DesiredStatus == structs.AllocDesiredStatusStop, nil
   675  	}, func(err error) {
   676  		t.Fatalf("err: %v", err)
   677  	})
   678  }
   679  
   680  func waitTilNodeReady(client *Client, t *testing.T) {
   681  	testutil.WaitForResult(func() (bool, error) {
   682  		n := client.Node()
   683  		if n.Status != structs.NodeStatusReady {
   684  			return false, fmt.Errorf("node not registered")
   685  		}
   686  		return true, nil
   687  	}, func(err error) {
   688  		t.Fatalf("err: %v", err)
   689  	})
   690  }
   691  
   692  func TestClient_SaveRestoreState(t *testing.T) {
   693  	ctestutil.ExecCompatible(t)
   694  	s1, _ := testServer(t, nil)
   695  	defer s1.Shutdown()
   696  	testutil.WaitForLeader(t, s1.RPC)
   697  
   698  	c1 := testClient(t, func(c *config.Config) {
   699  		c.DevMode = false
   700  		c.RPCHandler = s1
   701  	})
   702  	defer c1.Shutdown()
   703  
   704  	// Wait til the node is ready
   705  	waitTilNodeReady(c1, t)
   706  
   707  	// Create mock allocations
   708  	job := mock.Job()
   709  	alloc1 := mock.Alloc()
   710  	alloc1.NodeID = c1.Node().ID
   711  	alloc1.Job = job
   712  	alloc1.JobID = job.ID
   713  	alloc1.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
   714  	task := alloc1.Job.TaskGroups[0].Tasks[0]
   715  	task.Config["run_for"] = "10s"
   716  
   717  	state := s1.State()
   718  	if err := state.UpsertJob(100, job); err != nil {
   719  		t.Fatal(err)
   720  	}
   721  	if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
   722  		t.Fatal(err)
   723  	}
   724  	if err := state.UpsertAllocs(102, []*structs.Allocation{alloc1}); err != nil {
   725  		t.Fatalf("err: %v", err)
   726  	}
   727  
   728  	// Allocations should get registered
   729  	testutil.WaitForResult(func() (bool, error) {
   730  		c1.allocLock.RLock()
   731  		ar := c1.allocs[alloc1.ID]
   732  		c1.allocLock.RUnlock()
   733  		if ar == nil {
   734  			return false, fmt.Errorf("nil alloc runner")
   735  		}
   736  		if ar.Alloc().ClientStatus != structs.AllocClientStatusRunning {
   737  			return false, fmt.Errorf("client status: got %v; want %v", ar.Alloc().ClientStatus, structs.AllocClientStatusRunning)
   738  		}
   739  		return true, nil
   740  	}, func(err error) {
   741  		t.Fatalf("err: %v", err)
   742  	})
   743  
   744  	// Shutdown the client, saves state
   745  	if err := c1.Shutdown(); err != nil {
   746  		t.Fatalf("err: %v", err)
   747  	}
   748  
   749  	// Create a new client
   750  	logger := log.New(c1.config.LogOutput, "", log.LstdFlags)
   751  	catalog := consul.NewMockCatalog(logger)
   752  	mockService := newMockConsulServiceClient()
   753  	mockService.logger = logger
   754  	c2, err := NewClient(c1.config, catalog, mockService, logger)
   755  	if err != nil {
   756  		t.Fatalf("err: %v", err)
   757  	}
   758  	defer c2.Shutdown()
   759  
   760  	// Ensure the allocation is running
   761  	testutil.WaitForResult(func() (bool, error) {
   762  		c2.allocLock.RLock()
   763  		ar := c2.allocs[alloc1.ID]
   764  		c2.allocLock.RUnlock()
   765  		status := ar.Alloc().ClientStatus
   766  		alive := status == structs.AllocClientStatusRunning || status == structs.AllocClientStatusPending
   767  		if !alive {
   768  			return false, fmt.Errorf("incorrect client status: %#v", ar.Alloc())
   769  		}
   770  		return true, nil
   771  	}, func(err error) {
   772  		t.Fatalf("err: %v", err)
   773  	})
   774  
   775  	// Destroy all the allocations
   776  	for _, ar := range c2.getAllocRunners() {
   777  		ar.Destroy()
   778  	}
   779  
   780  	for _, ar := range c2.getAllocRunners() {
   781  		<-ar.WaitCh()
   782  	}
   783  }
   784  
   785  func TestClient_Init(t *testing.T) {
   786  	dir, err := ioutil.TempDir("", "nomad")
   787  	if err != nil {
   788  		t.Fatalf("err: %s", err)
   789  	}
   790  	defer os.RemoveAll(dir)
   791  	allocDir := filepath.Join(dir, "alloc")
   792  
   793  	client := &Client{
   794  		config: &config.Config{
   795  			AllocDir: allocDir,
   796  		},
   797  		logger: log.New(os.Stderr, "", log.LstdFlags),
   798  	}
   799  	if err := client.init(); err != nil {
   800  		t.Fatalf("err: %s", err)
   801  	}
   802  
   803  	if _, err := os.Stat(allocDir); err != nil {
   804  		t.Fatalf("err: %s", err)
   805  	}
   806  }
   807  
   808  func TestClient_BlockedAllocations(t *testing.T) {
   809  	s1, _ := testServer(t, nil)
   810  	defer s1.Shutdown()
   811  	testutil.WaitForLeader(t, s1.RPC)
   812  
   813  	c1 := testClient(t, func(c *config.Config) {
   814  		c.RPCHandler = s1
   815  	})
   816  	defer c1.Shutdown()
   817  
   818  	// Wait for the node to be ready
   819  	state := s1.State()
   820  	testutil.WaitForResult(func() (bool, error) {
   821  		ws := memdb.NewWatchSet()
   822  		out, err := state.NodeByID(ws, c1.Node().ID)
   823  		if err != nil {
   824  			return false, err
   825  		}
   826  		if out == nil || out.Status != structs.NodeStatusReady {
   827  			return false, fmt.Errorf("bad node: %#v", out)
   828  		}
   829  		return true, nil
   830  	}, func(err error) {
   831  		t.Fatalf("err: %v", err)
   832  	})
   833  
   834  	// Add an allocation
   835  	alloc := mock.Alloc()
   836  	alloc.NodeID = c1.Node().ID
   837  	alloc.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
   838  	alloc.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   839  		"kill_after":  "1s",
   840  		"run_for":     "100s",
   841  		"exit_code":   0,
   842  		"exit_signal": 0,
   843  		"exit_err":    "",
   844  	}
   845  
   846  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
   847  	state.UpsertAllocs(100, []*structs.Allocation{alloc})
   848  
   849  	// Wait until the client downloads and starts the allocation
   850  	testutil.WaitForResult(func() (bool, error) {
   851  		ws := memdb.NewWatchSet()
   852  		out, err := state.AllocByID(ws, alloc.ID)
   853  		if err != nil {
   854  			return false, err
   855  		}
   856  		if out == nil || out.ClientStatus != structs.AllocClientStatusRunning {
   857  			return false, fmt.Errorf("bad alloc: %#v", out)
   858  		}
   859  		return true, nil
   860  	}, func(err error) {
   861  		t.Fatalf("err: %v", err)
   862  	})
   863  
   864  	// Add a new chained alloc
   865  	alloc2 := alloc.Copy()
   866  	alloc2.ID = structs.GenerateUUID()
   867  	alloc2.Job = alloc.Job
   868  	alloc2.JobID = alloc.JobID
   869  	alloc2.PreviousAllocation = alloc.ID
   870  	if err := state.UpsertAllocs(200, []*structs.Allocation{alloc2}); err != nil {
   871  		t.Fatalf("err: %v", err)
   872  	}
   873  
   874  	// Enusre that the chained allocation is being tracked as blocked
   875  	testutil.WaitForResult(func() (bool, error) {
   876  		alloc, ok := c1.blockedAllocations[alloc2.PreviousAllocation]
   877  		if ok && alloc.ID == alloc2.ID {
   878  			return true, nil
   879  		}
   880  		return false, fmt.Errorf("no blocked allocations")
   881  	}, func(err error) {
   882  		t.Fatalf("err: %v", err)
   883  	})
   884  
   885  	// Change the desired state of the parent alloc to stop
   886  	alloc1 := alloc.Copy()
   887  	alloc1.DesiredStatus = structs.AllocDesiredStatusStop
   888  	if err := state.UpsertAllocs(300, []*structs.Allocation{alloc1}); err != nil {
   889  		t.Fatalf("err: %v", err)
   890  	}
   891  
   892  	// Ensure that there are no blocked allocations
   893  	testutil.WaitForResult(func() (bool, error) {
   894  		_, ok := c1.blockedAllocations[alloc2.PreviousAllocation]
   895  		if ok {
   896  			return false, fmt.Errorf("blocked evals present")
   897  		}
   898  		return true, nil
   899  	}, func(err error) {
   900  		t.Fatalf("err: %v", err)
   901  	})
   902  
   903  	// Destroy all the allocations
   904  	for _, ar := range c1.getAllocRunners() {
   905  		ar.Destroy()
   906  	}
   907  
   908  	for _, ar := range c1.getAllocRunners() {
   909  		<-ar.WaitCh()
   910  	}
   911  }
   912  
   913  func TestClient_UnarchiveAllocDir(t *testing.T) {
   914  	dir, err := ioutil.TempDir("", "")
   915  	if err != nil {
   916  		t.Fatalf("err: %v", err)
   917  	}
   918  	defer os.RemoveAll(dir)
   919  
   920  	if err := os.Mkdir(filepath.Join(dir, "foo"), 0777); err != nil {
   921  		t.Fatalf("err: %v", err)
   922  	}
   923  	dirInfo, err := os.Stat(filepath.Join(dir, "foo"))
   924  	if err != nil {
   925  		t.Fatalf("err: %v", err)
   926  	}
   927  	f, err := os.Create(filepath.Join(dir, "foo", "bar"))
   928  	if err != nil {
   929  		t.Fatalf("err: %v", err)
   930  	}
   931  	if _, err := f.WriteString("foo"); err != nil {
   932  		t.Fatalf("err: %v", err)
   933  	}
   934  	if err := f.Chmod(0644); err != nil {
   935  		t.Fatalf("err: %v", err)
   936  	}
   937  	fInfo, err := f.Stat()
   938  	if err != nil {
   939  		t.Fatalf("err: %v", err)
   940  	}
   941  	f.Close()
   942  
   943  	buf := new(bytes.Buffer)
   944  	tw := tar.NewWriter(buf)
   945  
   946  	walkFn := func(path string, fileInfo os.FileInfo, err error) error {
   947  		// Ignore if the file is a symlink
   948  		if fileInfo.Mode() == os.ModeSymlink {
   949  			return nil
   950  		}
   951  
   952  		// Include the path of the file name relative to the alloc dir
   953  		// so that we can put the files in the right directories
   954  		hdr, err := tar.FileInfoHeader(fileInfo, "")
   955  		if err != nil {
   956  			return fmt.Errorf("error creating file header: %v", err)
   957  		}
   958  		hdr.Name = fileInfo.Name()
   959  		tw.WriteHeader(hdr)
   960  
   961  		// If it's a directory we just write the header into the tar
   962  		if fileInfo.IsDir() {
   963  			return nil
   964  		}
   965  
   966  		// Write the file into the archive
   967  		file, err := os.Open(path)
   968  		if err != nil {
   969  			return err
   970  		}
   971  		defer file.Close()
   972  
   973  		if _, err := io.Copy(tw, file); err != nil {
   974  			return err
   975  		}
   976  
   977  		return nil
   978  	}
   979  
   980  	if err := filepath.Walk(dir, walkFn); err != nil {
   981  		t.Fatalf("err: %v", err)
   982  	}
   983  	tw.Close()
   984  
   985  	dir1, err := ioutil.TempDir("", "")
   986  	if err != nil {
   987  		t.Fatalf("err: %v", err)
   988  	}
   989  	defer os.RemoveAll(dir1)
   990  
   991  	c1 := testClient(t, func(c *config.Config) {
   992  		c.RPCHandler = nil
   993  	})
   994  	defer c1.Shutdown()
   995  
   996  	rc := ioutil.NopCloser(buf)
   997  
   998  	c1.migratingAllocs["123"] = newMigrateAllocCtrl(mock.Alloc())
   999  	if err := c1.unarchiveAllocDir(rc, "123", dir1); err != nil {
  1000  		t.Fatalf("err: %v", err)
  1001  	}
  1002  
  1003  	// Ensure foo is present
  1004  	fi, err := os.Stat(filepath.Join(dir1, "foo"))
  1005  	if err != nil {
  1006  		t.Fatalf("err: %v", err)
  1007  	}
  1008  	if fi.Mode() != dirInfo.Mode() {
  1009  		t.Fatalf("mode: %v", fi.Mode())
  1010  	}
  1011  
  1012  	fi1, err := os.Stat(filepath.Join(dir1, "bar"))
  1013  	if err != nil {
  1014  		t.Fatalf("err: %v", err)
  1015  	}
  1016  	if fi1.Mode() != fInfo.Mode() {
  1017  		t.Fatalf("mode: %v", fi1.Mode())
  1018  	}
  1019  }