github.com/maier/nomad@v0.4.1-0.20161110003312-a9e3d0b8549d/client/client_test.go (about)

     1  package client
     2  
     3  import (
     4  	"fmt"
     5  	"io/ioutil"
     6  	"log"
     7  	"net"
     8  	"os"
     9  	"path/filepath"
    10  	"runtime"
    11  	"sync/atomic"
    12  	"testing"
    13  	"time"
    14  
    15  	"github.com/hashicorp/nomad/client/config"
    16  	"github.com/hashicorp/nomad/command/agent/consul"
    17  	"github.com/hashicorp/nomad/nomad"
    18  	"github.com/hashicorp/nomad/nomad/mock"
    19  	"github.com/hashicorp/nomad/nomad/structs"
    20  	"github.com/hashicorp/nomad/testutil"
    21  	"github.com/mitchellh/hashstructure"
    22  
    23  	ctestutil "github.com/hashicorp/nomad/client/testutil"
    24  )
    25  
    26  var (
    27  	nextPort uint32 = 16000
    28  
    29  	osExecDriverSupport = map[string]bool{
    30  		"linux": true,
    31  	}
    32  )
    33  
    34  func getPort() int {
    35  	return int(atomic.AddUint32(&nextPort, 1))
    36  }
    37  
    38  func testServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string) {
    39  	f := false
    40  
    41  	// Setup the default settings
    42  	config := nomad.DefaultConfig()
    43  	config.VaultConfig.Enabled = &f
    44  	config.Build = "unittest"
    45  	config.DevMode = true
    46  	config.RPCAddr = &net.TCPAddr{
    47  		IP:   []byte{127, 0, 0, 1},
    48  		Port: getPort(),
    49  	}
    50  	config.NodeName = fmt.Sprintf("Node %d", config.RPCAddr.Port)
    51  
    52  	// Tighten the Serf timing
    53  	config.SerfConfig.MemberlistConfig.BindAddr = "127.0.0.1"
    54  	config.SerfConfig.MemberlistConfig.BindPort = getPort()
    55  	config.SerfConfig.MemberlistConfig.SuspicionMult = 2
    56  	config.SerfConfig.MemberlistConfig.RetransmitMult = 2
    57  	config.SerfConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
    58  	config.SerfConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
    59  	config.SerfConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
    60  
    61  	// Tighten the Raft timing
    62  	config.RaftConfig.LeaderLeaseTimeout = 20 * time.Millisecond
    63  	config.RaftConfig.HeartbeatTimeout = 40 * time.Millisecond
    64  	config.RaftConfig.ElectionTimeout = 40 * time.Millisecond
    65  	config.RaftConfig.StartAsLeader = true
    66  	config.RaftTimeout = 500 * time.Millisecond
    67  
    68  	// Invoke the callback if any
    69  	if cb != nil {
    70  		cb(config)
    71  	}
    72  
    73  	shutdownCh := make(chan struct{})
    74  	logger := log.New(config.LogOutput, "", log.LstdFlags)
    75  	consulSyncer, err := consul.NewSyncer(config.ConsulConfig, shutdownCh, logger)
    76  	if err != nil {
    77  		t.Fatalf("err: %v", err)
    78  	}
    79  
    80  	// Create server
    81  	server, err := nomad.NewServer(config, consulSyncer, logger)
    82  	if err != nil {
    83  		t.Fatalf("err: %v", err)
    84  	}
    85  	return server, config.RPCAddr.String()
    86  }
    87  
    88  func testClient(t *testing.T, cb func(c *config.Config)) *Client {
    89  	f := false
    90  
    91  	conf := config.DefaultConfig()
    92  	conf.VaultConfig.Enabled = &f
    93  	conf.DevMode = true
    94  	if cb != nil {
    95  		cb(conf)
    96  	}
    97  
    98  	shutdownCh := make(chan struct{})
    99  	consulSyncer, err := consul.NewSyncer(conf.ConsulConfig, shutdownCh, log.New(os.Stderr, "", log.LstdFlags))
   100  	if err != nil {
   101  		t.Fatalf("err: %v", err)
   102  	}
   103  
   104  	logger := log.New(conf.LogOutput, "", log.LstdFlags)
   105  	client, err := NewClient(conf, consulSyncer, logger)
   106  	if err != nil {
   107  		t.Fatalf("err: %v", err)
   108  	}
   109  	return client
   110  }
   111  
   112  func TestClient_StartStop(t *testing.T) {
   113  	client := testClient(t, nil)
   114  	if err := client.Shutdown(); err != nil {
   115  		t.Fatalf("err: %v", err)
   116  	}
   117  }
   118  
   119  func TestClient_RPC(t *testing.T) {
   120  	s1, addr := testServer(t, nil)
   121  	defer s1.Shutdown()
   122  
   123  	c1 := testClient(t, func(c *config.Config) {
   124  		c.Servers = []string{addr}
   125  	})
   126  	defer c1.Shutdown()
   127  
   128  	// RPC should succeed
   129  	testutil.WaitForResult(func() (bool, error) {
   130  		var out struct{}
   131  		err := c1.RPC("Status.Ping", struct{}{}, &out)
   132  		return err == nil, err
   133  	}, func(err error) {
   134  		t.Fatalf("err: %v", err)
   135  	})
   136  }
   137  
   138  func TestClient_RPC_Passthrough(t *testing.T) {
   139  	s1, _ := testServer(t, nil)
   140  	defer s1.Shutdown()
   141  
   142  	c1 := testClient(t, func(c *config.Config) {
   143  		c.RPCHandler = s1
   144  	})
   145  	defer c1.Shutdown()
   146  
   147  	// RPC should succeed
   148  	testutil.WaitForResult(func() (bool, error) {
   149  		var out struct{}
   150  		err := c1.RPC("Status.Ping", struct{}{}, &out)
   151  		return err == nil, err
   152  	}, func(err error) {
   153  		t.Fatalf("err: %v", err)
   154  	})
   155  }
   156  
   157  func TestClient_Fingerprint(t *testing.T) {
   158  	c := testClient(t, nil)
   159  	defer c.Shutdown()
   160  
   161  	// Ensure kernel and arch are always present
   162  	node := c.Node()
   163  	if node.Attributes["kernel.name"] == "" {
   164  		t.Fatalf("missing kernel.name")
   165  	}
   166  	if node.Attributes["arch"] == "" {
   167  		t.Fatalf("missing arch")
   168  	}
   169  }
   170  
   171  func TestClient_HasNodeChanged(t *testing.T) {
   172  	c := testClient(t, nil)
   173  	defer c.Shutdown()
   174  
   175  	node := c.Node()
   176  	attrHash, err := hashstructure.Hash(node.Attributes, nil)
   177  	if err != nil {
   178  		c.logger.Printf("[DEBUG] client: unable to calculate node attributes hash: %v", err)
   179  	}
   180  	// Calculate node meta map hash
   181  	metaHash, err := hashstructure.Hash(node.Meta, nil)
   182  	if err != nil {
   183  		c.logger.Printf("[DEBUG] client: unable to calculate node meta hash: %v", err)
   184  	}
   185  	if changed, _, _ := c.hasNodeChanged(attrHash, metaHash); changed {
   186  		t.Fatalf("Unexpected hash change.")
   187  	}
   188  
   189  	// Change node attribute
   190  	node.Attributes["arch"] = "xyz_86"
   191  	if changed, newAttrHash, _ := c.hasNodeChanged(attrHash, metaHash); !changed {
   192  		t.Fatalf("Expected hash change in attributes: %d vs %d", attrHash, newAttrHash)
   193  	}
   194  
   195  	// Change node meta map
   196  	node.Meta["foo"] = "bar"
   197  	if changed, _, newMetaHash := c.hasNodeChanged(attrHash, metaHash); !changed {
   198  		t.Fatalf("Expected hash change in meta map: %d vs %d", metaHash, newMetaHash)
   199  	}
   200  }
   201  
   202  func TestClient_Fingerprint_InWhitelist(t *testing.T) {
   203  	c := testClient(t, func(c *config.Config) {
   204  		if c.Options == nil {
   205  			c.Options = make(map[string]string)
   206  		}
   207  
   208  		// Weird spacing to test trimming. Whitelist all modules expect cpu.
   209  		c.Options["fingerprint.whitelist"] = "  arch, consul,cpu,env_aws,env_gce,host,memory,network,storage,foo,bar	"
   210  	})
   211  	defer c.Shutdown()
   212  
   213  	node := c.Node()
   214  	if node.Attributes["cpu.frequency"] == "" {
   215  		t.Fatalf("missing cpu fingerprint module")
   216  	}
   217  }
   218  
   219  func TestClient_Fingerprint_InBlacklist(t *testing.T) {
   220  	c := testClient(t, func(c *config.Config) {
   221  		if c.Options == nil {
   222  			c.Options = make(map[string]string)
   223  		}
   224  
   225  		// Weird spacing to test trimming. Blacklist cpu.
   226  		c.Options["fingerprint.blacklist"] = "  cpu	"
   227  	})
   228  	defer c.Shutdown()
   229  
   230  	node := c.Node()
   231  	if node.Attributes["cpu.frequency"] != "" {
   232  		t.Fatalf("cpu fingerprint module loaded despite blacklisting")
   233  	}
   234  }
   235  
   236  func TestClient_Fingerprint_OutOfWhitelist(t *testing.T) {
   237  	c := testClient(t, func(c *config.Config) {
   238  		if c.Options == nil {
   239  			c.Options = make(map[string]string)
   240  		}
   241  
   242  		c.Options["fingerprint.whitelist"] = "arch,consul,env_aws,env_gce,host,memory,network,storage,foo,bar"
   243  	})
   244  	defer c.Shutdown()
   245  
   246  	node := c.Node()
   247  	if node.Attributes["cpu.frequency"] != "" {
   248  		t.Fatalf("found cpu fingerprint module")
   249  	}
   250  }
   251  
   252  func TestClient_Fingerprint_WhitelistBlacklistCombination(t *testing.T) {
   253  	c := testClient(t, func(c *config.Config) {
   254  		if c.Options == nil {
   255  			c.Options = make(map[string]string)
   256  		}
   257  
   258  		// With both white- and blacklist, should return the set difference of modules (arch, cpu)
   259  		c.Options["fingerprint.whitelist"] = "arch,memory,cpu"
   260  		c.Options["fingerprint.blacklist"] = "memory,nomad"
   261  	})
   262  	defer c.Shutdown()
   263  
   264  	node := c.Node()
   265  	// Check expected modules are present
   266  	if node.Attributes["cpu.frequency"] == "" {
   267  		t.Fatalf("missing cpu fingerprint module")
   268  	}
   269  	if node.Attributes["arch"] == "" {
   270  		t.Fatalf("missing arch fingerprint module")
   271  	}
   272  	// Check remainder _not_ present
   273  	if node.Attributes["memory.totalbytes"] != "" {
   274  		t.Fatalf("found memory fingerprint module")
   275  	}
   276  	if node.Attributes["nomad.version"] != "" {
   277  		t.Fatalf("found nomad fingerprint module")
   278  	}
   279  }
   280  
   281  func TestClient_Drivers(t *testing.T) {
   282  	c := testClient(t, nil)
   283  	defer c.Shutdown()
   284  
   285  	node := c.Node()
   286  	if node.Attributes["driver.exec"] == "" {
   287  		if v, ok := osExecDriverSupport[runtime.GOOS]; v && ok {
   288  			t.Fatalf("missing exec driver")
   289  		} else {
   290  			t.Skipf("missing exec driver, no OS support")
   291  		}
   292  	}
   293  }
   294  
   295  func TestClient_Drivers_InWhitelist(t *testing.T) {
   296  	c := testClient(t, func(c *config.Config) {
   297  		if c.Options == nil {
   298  			c.Options = make(map[string]string)
   299  		}
   300  
   301  		// Weird spacing to test trimming
   302  		c.Options["driver.whitelist"] = "   exec ,  foo	"
   303  	})
   304  	defer c.Shutdown()
   305  
   306  	node := c.Node()
   307  	if node.Attributes["driver.exec"] == "" {
   308  		if v, ok := osExecDriverSupport[runtime.GOOS]; v && ok {
   309  			t.Fatalf("missing exec driver")
   310  		} else {
   311  			t.Skipf("missing exec driver, no OS support")
   312  		}
   313  	}
   314  }
   315  
   316  func TestClient_Drivers_InBlacklist(t *testing.T) {
   317  	c := testClient(t, func(c *config.Config) {
   318  		if c.Options == nil {
   319  			c.Options = make(map[string]string)
   320  		}
   321  
   322  		// Weird spacing to test trimming
   323  		c.Options["driver.blacklist"] = "   exec ,  foo	"
   324  	})
   325  	defer c.Shutdown()
   326  
   327  	node := c.Node()
   328  	if node.Attributes["driver.exec"] != "" {
   329  		if v, ok := osExecDriverSupport[runtime.GOOS]; !v && ok {
   330  			t.Fatalf("exec driver loaded despite blacklist")
   331  		} else {
   332  			t.Skipf("missing exec driver, no OS support")
   333  		}
   334  	}
   335  }
   336  
   337  func TestClient_Drivers_OutOfWhitelist(t *testing.T) {
   338  	c := testClient(t, func(c *config.Config) {
   339  		if c.Options == nil {
   340  			c.Options = make(map[string]string)
   341  		}
   342  
   343  		c.Options["driver.whitelist"] = "foo,bar,baz"
   344  	})
   345  	defer c.Shutdown()
   346  
   347  	node := c.Node()
   348  	if node.Attributes["driver.exec"] != "" {
   349  		t.Fatalf("found exec driver")
   350  	}
   351  }
   352  
   353  func TestClient_Drivers_WhitelistBlacklistCombination(t *testing.T) {
   354  	c := testClient(t, func(c *config.Config) {
   355  		if c.Options == nil {
   356  			c.Options = make(map[string]string)
   357  		}
   358  
   359  		// Expected output is set difference (raw_exec)
   360  		c.Options["driver.whitelist"] = "raw_exec,exec"
   361  		c.Options["driver.blacklist"] = "exec"
   362  	})
   363  	defer c.Shutdown()
   364  
   365  	node := c.Node()
   366  	// Check expected present
   367  	if node.Attributes["driver.raw_exec"] == "" {
   368  		t.Fatalf("missing raw_exec driver")
   369  	}
   370  	// Check expected absent
   371  	if node.Attributes["driver.exec"] != "" {
   372  		t.Fatalf("exec driver loaded despite blacklist")
   373  	}
   374  }
   375  
   376  func TestClient_Register(t *testing.T) {
   377  	s1, _ := testServer(t, nil)
   378  	defer s1.Shutdown()
   379  	testutil.WaitForLeader(t, s1.RPC)
   380  
   381  	c1 := testClient(t, func(c *config.Config) {
   382  		c.RPCHandler = s1
   383  	})
   384  	defer c1.Shutdown()
   385  
   386  	req := structs.NodeSpecificRequest{
   387  		NodeID:       c1.Node().ID,
   388  		QueryOptions: structs.QueryOptions{Region: "global"},
   389  	}
   390  	var out structs.SingleNodeResponse
   391  
   392  	// Register should succeed
   393  	testutil.WaitForResult(func() (bool, error) {
   394  		err := s1.RPC("Node.GetNode", &req, &out)
   395  		if err != nil {
   396  			return false, err
   397  		}
   398  		if out.Node == nil {
   399  			return false, fmt.Errorf("missing reg")
   400  		}
   401  		return out.Node.ID == req.NodeID, nil
   402  	}, func(err error) {
   403  		t.Fatalf("err: %v", err)
   404  	})
   405  }
   406  
   407  func TestClient_Heartbeat(t *testing.T) {
   408  	s1, _ := testServer(t, func(c *nomad.Config) {
   409  		c.MinHeartbeatTTL = 50 * time.Millisecond
   410  	})
   411  	defer s1.Shutdown()
   412  	testutil.WaitForLeader(t, s1.RPC)
   413  
   414  	c1 := testClient(t, func(c *config.Config) {
   415  		c.RPCHandler = s1
   416  	})
   417  	defer c1.Shutdown()
   418  
   419  	req := structs.NodeSpecificRequest{
   420  		NodeID:       c1.Node().ID,
   421  		QueryOptions: structs.QueryOptions{Region: "global"},
   422  	}
   423  	var out structs.SingleNodeResponse
   424  
   425  	// Register should succeed
   426  	testutil.WaitForResult(func() (bool, error) {
   427  		err := s1.RPC("Node.GetNode", &req, &out)
   428  		if err != nil {
   429  			return false, err
   430  		}
   431  		if out.Node == nil {
   432  			return false, fmt.Errorf("missing reg")
   433  		}
   434  		return out.Node.Status == structs.NodeStatusReady, nil
   435  	}, func(err error) {
   436  		t.Fatalf("err: %v", err)
   437  	})
   438  }
   439  
   440  func TestClient_UpdateAllocStatus(t *testing.T) {
   441  	s1, _ := testServer(t, nil)
   442  	defer s1.Shutdown()
   443  	testutil.WaitForLeader(t, s1.RPC)
   444  
   445  	c1 := testClient(t, func(c *config.Config) {
   446  		c.RPCHandler = s1
   447  	})
   448  	defer c1.Shutdown()
   449  
   450  	// Wait til the node is ready
   451  	waitTilNodeReady(c1, t)
   452  
   453  	job := mock.Job()
   454  	alloc := mock.Alloc()
   455  	alloc.NodeID = c1.Node().ID
   456  	alloc.Job = job
   457  	alloc.JobID = job.ID
   458  	originalStatus := "foo"
   459  	alloc.ClientStatus = originalStatus
   460  
   461  	// Insert at zero so they are pulled
   462  	state := s1.State()
   463  	if err := state.UpsertJob(0, job); err != nil {
   464  		t.Fatal(err)
   465  	}
   466  	if err := state.UpsertJobSummary(100, mock.JobSummary(alloc.JobID)); err != nil {
   467  		t.Fatal(err)
   468  	}
   469  	state.UpsertAllocs(101, []*structs.Allocation{alloc})
   470  
   471  	testutil.WaitForResult(func() (bool, error) {
   472  		out, err := state.AllocByID(alloc.ID)
   473  		if err != nil {
   474  			return false, err
   475  		}
   476  		if out == nil {
   477  			return false, fmt.Errorf("no such alloc")
   478  		}
   479  		if out.ClientStatus == originalStatus {
   480  			return false, fmt.Errorf("Alloc client status not updated; got %v", out.ClientStatus)
   481  		}
   482  		return true, nil
   483  	}, func(err error) {
   484  		t.Fatalf("err: %v", err)
   485  	})
   486  }
   487  
   488  func TestClient_WatchAllocs(t *testing.T) {
   489  	ctestutil.ExecCompatible(t)
   490  	s1, _ := testServer(t, nil)
   491  	defer s1.Shutdown()
   492  	testutil.WaitForLeader(t, s1.RPC)
   493  
   494  	c1 := testClient(t, func(c *config.Config) {
   495  		c.RPCHandler = s1
   496  	})
   497  	defer c1.Shutdown()
   498  
   499  	// Wait til the node is ready
   500  	waitTilNodeReady(c1, t)
   501  
   502  	// Create mock allocations
   503  	job := mock.Job()
   504  	alloc1 := mock.Alloc()
   505  	alloc1.JobID = job.ID
   506  	alloc1.Job = job
   507  	alloc1.NodeID = c1.Node().ID
   508  	alloc2 := mock.Alloc()
   509  	alloc2.NodeID = c1.Node().ID
   510  	alloc2.JobID = job.ID
   511  	alloc2.Job = job
   512  
   513  	// Insert at zero so they are pulled
   514  	state := s1.State()
   515  	if err := state.UpsertJob(100, job); err != nil {
   516  		t.Fatal(err)
   517  	}
   518  	if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
   519  		t.Fatal(err)
   520  	}
   521  	err := state.UpsertAllocs(102, []*structs.Allocation{alloc1, alloc2})
   522  	if err != nil {
   523  		t.Fatalf("err: %v", err)
   524  	}
   525  
   526  	// Both allocations should get registered
   527  	testutil.WaitForResult(func() (bool, error) {
   528  		c1.allocLock.RLock()
   529  		num := len(c1.allocs)
   530  		c1.allocLock.RUnlock()
   531  		return num == 2, nil
   532  	}, func(err error) {
   533  		t.Fatalf("err: %v", err)
   534  	})
   535  
   536  	// Delete one allocation
   537  	err = state.DeleteEval(103, nil, []string{alloc1.ID})
   538  	if err != nil {
   539  		t.Fatalf("err: %v", err)
   540  	}
   541  
   542  	// Update the other allocation. Have to make a copy because the allocs are
   543  	// shared in memory in the test and the modify index would be updated in the
   544  	// alloc runner.
   545  	alloc2_2 := new(structs.Allocation)
   546  	*alloc2_2 = *alloc2
   547  	alloc2_2.DesiredStatus = structs.AllocDesiredStatusStop
   548  	err = state.UpsertAllocs(104, []*structs.Allocation{alloc2_2})
   549  	if err != nil {
   550  		t.Fatalf("err: %v", err)
   551  	}
   552  
   553  	// One allocations should get de-registered
   554  	testutil.WaitForResult(func() (bool, error) {
   555  		c1.allocLock.RLock()
   556  		num := len(c1.allocs)
   557  		c1.allocLock.RUnlock()
   558  		return num == 1, nil
   559  	}, func(err error) {
   560  		t.Fatalf("err: %v", err)
   561  	})
   562  
   563  	// One allocations should get updated
   564  	testutil.WaitForResult(func() (bool, error) {
   565  		c1.allocLock.RLock()
   566  		ar := c1.allocs[alloc2.ID]
   567  		c1.allocLock.RUnlock()
   568  		return ar.Alloc().DesiredStatus == structs.AllocDesiredStatusStop, nil
   569  	}, func(err error) {
   570  		t.Fatalf("err: %v", err)
   571  	})
   572  }
   573  
   574  func waitTilNodeReady(client *Client, t *testing.T) {
   575  	testutil.WaitForResult(func() (bool, error) {
   576  		n := client.Node()
   577  		if n.Status != structs.NodeStatusReady {
   578  			return false, fmt.Errorf("node not registered")
   579  		}
   580  		return true, nil
   581  	}, func(err error) {
   582  		t.Fatalf("err: %v", err)
   583  	})
   584  }
   585  
   586  func TestClient_SaveRestoreState(t *testing.T) {
   587  	ctestutil.ExecCompatible(t)
   588  	s1, _ := testServer(t, nil)
   589  	defer s1.Shutdown()
   590  	testutil.WaitForLeader(t, s1.RPC)
   591  
   592  	c1 := testClient(t, func(c *config.Config) {
   593  		c.DevMode = false
   594  		c.RPCHandler = s1
   595  	})
   596  	defer c1.Shutdown()
   597  
   598  	// Wait til the node is ready
   599  	waitTilNodeReady(c1, t)
   600  
   601  	// Create mock allocations
   602  	job := mock.Job()
   603  	alloc1 := mock.Alloc()
   604  	alloc1.NodeID = c1.Node().ID
   605  	alloc1.Job = job
   606  	alloc1.JobID = job.ID
   607  	alloc1.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
   608  	task := alloc1.Job.TaskGroups[0].Tasks[0]
   609  	task.Config["run_for"] = "10s"
   610  
   611  	state := s1.State()
   612  	if err := state.UpsertJob(100, job); err != nil {
   613  		t.Fatal(err)
   614  	}
   615  	if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
   616  		t.Fatal(err)
   617  	}
   618  	if err := state.UpsertAllocs(102, []*structs.Allocation{alloc1}); err != nil {
   619  		t.Fatalf("err: %v", err)
   620  	}
   621  
   622  	// Allocations should get registered
   623  	testutil.WaitForResult(func() (bool, error) {
   624  		c1.allocLock.RLock()
   625  		ar := c1.allocs[alloc1.ID]
   626  		c1.allocLock.RUnlock()
   627  		if ar == nil {
   628  			return false, fmt.Errorf("nil alloc runner")
   629  		}
   630  		if ar.Alloc().ClientStatus != structs.AllocClientStatusRunning {
   631  			return false, fmt.Errorf("client status: got %v; want %v", ar.Alloc().ClientStatus, structs.AllocClientStatusRunning)
   632  		}
   633  		return true, nil
   634  	}, func(err error) {
   635  		t.Fatalf("err: %v", err)
   636  	})
   637  
   638  	// Shutdown the client, saves state
   639  	if err := c1.Shutdown(); err != nil {
   640  		t.Fatalf("err: %v", err)
   641  	}
   642  
   643  	// Create a new client
   644  	shutdownCh := make(chan struct{})
   645  	logger := log.New(c1.config.LogOutput, "", log.LstdFlags)
   646  	consulSyncer, err := consul.NewSyncer(c1.config.ConsulConfig, shutdownCh, logger)
   647  	if err != nil {
   648  		t.Fatalf("err: %v", err)
   649  	}
   650  
   651  	c2, err := NewClient(c1.config, consulSyncer, logger)
   652  	if err != nil {
   653  		t.Fatalf("err: %v", err)
   654  	}
   655  	defer c2.Shutdown()
   656  
   657  	// Ensure the allocation is running
   658  	testutil.WaitForResult(func() (bool, error) {
   659  		c2.allocLock.RLock()
   660  		ar := c2.allocs[alloc1.ID]
   661  		c2.allocLock.RUnlock()
   662  		status := ar.Alloc().ClientStatus
   663  		alive := status != structs.AllocClientStatusRunning ||
   664  			status != structs.AllocClientStatusPending
   665  		if !alive {
   666  			return false, fmt.Errorf("incorrect client status: %#v", ar.Alloc())
   667  		}
   668  		return true, nil
   669  	}, func(err error) {
   670  		t.Fatalf("err: %v", err)
   671  	})
   672  
   673  	// Destroy all the allocations
   674  	c2.allocLock.Lock()
   675  	for _, ar := range c2.allocs {
   676  		ar.Destroy()
   677  		<-ar.WaitCh()
   678  	}
   679  	c2.allocLock.Unlock()
   680  }
   681  
   682  func TestClient_Init(t *testing.T) {
   683  	dir, err := ioutil.TempDir("", "nomad")
   684  	if err != nil {
   685  		t.Fatalf("err: %s", err)
   686  	}
   687  	defer os.RemoveAll(dir)
   688  	allocDir := filepath.Join(dir, "alloc")
   689  
   690  	client := &Client{
   691  		config: &config.Config{
   692  			AllocDir: allocDir,
   693  		},
   694  		logger: log.New(os.Stderr, "", log.LstdFlags),
   695  	}
   696  	if err := client.init(); err != nil {
   697  		t.Fatalf("err: %s", err)
   698  	}
   699  
   700  	if _, err := os.Stat(allocDir); err != nil {
   701  		t.Fatalf("err: %s", err)
   702  	}
   703  }
   704  
   705  func TestClient_BlockedAllocations(t *testing.T) {
   706  	s1, _ := testServer(t, nil)
   707  	defer s1.Shutdown()
   708  	testutil.WaitForLeader(t, s1.RPC)
   709  
   710  	c1 := testClient(t, func(c *config.Config) {
   711  		c.RPCHandler = s1
   712  	})
   713  	defer c1.Shutdown()
   714  
   715  	// Wait for the node to be ready
   716  	state := s1.State()
   717  	testutil.WaitForResult(func() (bool, error) {
   718  		out, err := state.NodeByID(c1.Node().ID)
   719  		if err != nil {
   720  			return false, err
   721  		}
   722  		if out == nil || out.Status != structs.NodeStatusReady {
   723  			return false, fmt.Errorf("bad node: %#v", out)
   724  		}
   725  		return true, nil
   726  	}, func(err error) {
   727  		t.Fatalf("err: %v", err)
   728  	})
   729  
   730  	// Add an allocation
   731  	alloc := mock.Alloc()
   732  	alloc.NodeID = c1.Node().ID
   733  	alloc.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
   734  	alloc.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   735  		"kill_after":  "1s",
   736  		"run_for":     "100s",
   737  		"exit_code":   0,
   738  		"exit_signal": 0,
   739  		"exit_err":    "",
   740  	}
   741  
   742  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
   743  	state.UpsertAllocs(100, []*structs.Allocation{alloc})
   744  
   745  	// Wait until the client downloads and starts the allocation
   746  	testutil.WaitForResult(func() (bool, error) {
   747  		out, err := state.AllocByID(alloc.ID)
   748  		if err != nil {
   749  			return false, err
   750  		}
   751  		if out == nil || out.ClientStatus != structs.AllocClientStatusRunning {
   752  			return false, fmt.Errorf("bad alloc: %#v", out)
   753  		}
   754  		return true, nil
   755  	}, func(err error) {
   756  		t.Fatalf("err: %v", err)
   757  	})
   758  
   759  	// Add a new chained alloc
   760  	alloc2 := alloc.Copy()
   761  	alloc2.ID = structs.GenerateUUID()
   762  	alloc2.Job = alloc.Job
   763  	alloc2.JobID = alloc.JobID
   764  	alloc2.PreviousAllocation = alloc.ID
   765  	if err := state.UpsertAllocs(200, []*structs.Allocation{alloc2}); err != nil {
   766  		t.Fatalf("err: %v", err)
   767  	}
   768  
   769  	// Enusre that the chained allocation is being tracked as blocked
   770  	testutil.WaitForResult(func() (bool, error) {
   771  		alloc, ok := c1.blockedAllocations[alloc2.PreviousAllocation]
   772  		if ok && alloc.ID == alloc2.ID {
   773  			return true, nil
   774  		}
   775  		return false, fmt.Errorf("no blocked allocations")
   776  	}, func(err error) {
   777  		t.Fatalf("err: %v", err)
   778  	})
   779  
   780  	// Change the desired state of the parent alloc to stop
   781  	alloc1 := alloc.Copy()
   782  	alloc1.DesiredStatus = structs.AllocDesiredStatusStop
   783  	if err := state.UpsertAllocs(300, []*structs.Allocation{alloc1}); err != nil {
   784  		t.Fatalf("err: %v", err)
   785  	}
   786  
   787  	// Ensure that there are no blocked allocations
   788  	testutil.WaitForResult(func() (bool, error) {
   789  		_, ok := c1.blockedAllocations[alloc2.PreviousAllocation]
   790  		if ok {
   791  			return false, fmt.Errorf("blocked evals present")
   792  		}
   793  		return true, nil
   794  	}, func(err error) {
   795  		t.Fatalf("err: %v", err)
   796  	})
   797  
   798  	// Destroy all the allocations
   799  	c1.allocLock.Lock()
   800  	for _, ar := range c1.allocs {
   801  		ar.Destroy()
   802  		<-ar.WaitCh()
   803  	}
   804  	c1.allocLock.Unlock()
   805  
   806  }