github.com/ncodes/nomad@v0.5.7-0.20170403112158-97adf4a74fb3/client/client_test.go (about)

     1  package client
     2  
     3  import (
     4  	"archive/tar"
     5  	"bytes"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  	"log"
    10  	"net"
    11  	"os"
    12  	"path/filepath"
    13  	"runtime"
    14  	"sync/atomic"
    15  	"testing"
    16  	"time"
    17  
    18  	memdb "github.com/hashicorp/go-memdb"
    19  	"github.com/ncodes/nomad/client/config"
    20  	"github.com/ncodes/nomad/command/agent/consul"
    21  	"github.com/ncodes/nomad/nomad"
    22  	"github.com/ncodes/nomad/nomad/mock"
    23  	"github.com/ncodes/nomad/nomad/structs"
    24  	"github.com/ncodes/nomad/testutil"
    25  	"github.com/mitchellh/hashstructure"
    26  
    27  	ctestutil "github.com/ncodes/nomad/client/testutil"
    28  )
    29  
    30  var (
    31  	nextPort uint32 = 16000
    32  
    33  	osExecDriverSupport = map[string]bool{
    34  		"linux": true,
    35  	}
    36  )
    37  
    38  func getPort() int {
    39  	return int(atomic.AddUint32(&nextPort, 1))
    40  }
    41  
    42  func testServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string) {
    43  	f := false
    44  
    45  	// Setup the default settings
    46  	config := nomad.DefaultConfig()
    47  	config.VaultConfig.Enabled = &f
    48  	config.Build = "unittest"
    49  	config.DevMode = true
    50  	config.RPCAddr = &net.TCPAddr{
    51  		IP:   []byte{127, 0, 0, 1},
    52  		Port: getPort(),
    53  	}
    54  	config.NodeName = fmt.Sprintf("Node %d", config.RPCAddr.Port)
    55  
    56  	// Tighten the Serf timing
    57  	config.SerfConfig.MemberlistConfig.BindAddr = "127.0.0.1"
    58  	config.SerfConfig.MemberlistConfig.BindPort = getPort()
    59  	config.SerfConfig.MemberlistConfig.SuspicionMult = 2
    60  	config.SerfConfig.MemberlistConfig.RetransmitMult = 2
    61  	config.SerfConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
    62  	config.SerfConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
    63  	config.SerfConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
    64  
    65  	// Tighten the Raft timing
    66  	config.RaftConfig.LeaderLeaseTimeout = 20 * time.Millisecond
    67  	config.RaftConfig.HeartbeatTimeout = 40 * time.Millisecond
    68  	config.RaftConfig.ElectionTimeout = 40 * time.Millisecond
    69  	config.RaftConfig.StartAsLeader = true
    70  	config.RaftTimeout = 500 * time.Millisecond
    71  
    72  	// Invoke the callback if any
    73  	if cb != nil {
    74  		cb(config)
    75  	}
    76  
    77  	shutdownCh := make(chan struct{})
    78  	logger := log.New(config.LogOutput, "", log.LstdFlags)
    79  	consulSyncer, err := consul.NewSyncer(config.ConsulConfig, shutdownCh, logger)
    80  	if err != nil {
    81  		t.Fatalf("err: %v", err)
    82  	}
    83  
    84  	// Create server
    85  	server, err := nomad.NewServer(config, consulSyncer, logger)
    86  	if err != nil {
    87  		t.Fatalf("err: %v", err)
    88  	}
    89  	return server, config.RPCAddr.String()
    90  }
    91  
    92  func testClient(t *testing.T, cb func(c *config.Config)) *Client {
    93  	f := false
    94  
    95  	conf := config.DefaultConfig()
    96  	conf.VaultConfig.Enabled = &f
    97  	conf.DevMode = true
    98  	conf.Node = &structs.Node{
    99  		Reserved: &structs.Resources{
   100  			DiskMB: 0,
   101  		},
   102  	}
   103  	if cb != nil {
   104  		cb(conf)
   105  	}
   106  
   107  	shutdownCh := make(chan struct{})
   108  	consulSyncer, err := consul.NewSyncer(conf.ConsulConfig, shutdownCh, log.New(os.Stderr, "", log.LstdFlags))
   109  	if err != nil {
   110  		t.Fatalf("err: %v", err)
   111  	}
   112  
   113  	logger := log.New(conf.LogOutput, "", log.LstdFlags)
   114  	client, err := NewClient(conf, consulSyncer, logger)
   115  	if err != nil {
   116  		t.Fatalf("err: %v", err)
   117  	}
   118  	return client
   119  }
   120  
   121  func TestClient_StartStop(t *testing.T) {
   122  	client := testClient(t, nil)
   123  	if err := client.Shutdown(); err != nil {
   124  		t.Fatalf("err: %v", err)
   125  	}
   126  }
   127  
   128  func TestClient_RPC(t *testing.T) {
   129  	s1, addr := testServer(t, nil)
   130  	defer s1.Shutdown()
   131  
   132  	c1 := testClient(t, func(c *config.Config) {
   133  		c.Servers = []string{addr}
   134  	})
   135  	defer c1.Shutdown()
   136  
   137  	// RPC should succeed
   138  	testutil.WaitForResult(func() (bool, error) {
   139  		var out struct{}
   140  		err := c1.RPC("Status.Ping", struct{}{}, &out)
   141  		return err == nil, err
   142  	}, func(err error) {
   143  		t.Fatalf("err: %v", err)
   144  	})
   145  }
   146  
   147  func TestClient_RPC_Passthrough(t *testing.T) {
   148  	s1, _ := testServer(t, nil)
   149  	defer s1.Shutdown()
   150  
   151  	c1 := testClient(t, func(c *config.Config) {
   152  		c.RPCHandler = s1
   153  	})
   154  	defer c1.Shutdown()
   155  
   156  	// RPC should succeed
   157  	testutil.WaitForResult(func() (bool, error) {
   158  		var out struct{}
   159  		err := c1.RPC("Status.Ping", struct{}{}, &out)
   160  		return err == nil, err
   161  	}, func(err error) {
   162  		t.Fatalf("err: %v", err)
   163  	})
   164  }
   165  
   166  func TestClient_Fingerprint(t *testing.T) {
   167  	c := testClient(t, nil)
   168  	defer c.Shutdown()
   169  
   170  	// Ensure kernel and arch are always present
   171  	node := c.Node()
   172  	if node.Attributes["kernel.name"] == "" {
   173  		t.Fatalf("missing kernel.name")
   174  	}
   175  	if node.Attributes["cpu.arch"] == "" {
   176  		t.Fatalf("missing cpu arch")
   177  	}
   178  }
   179  
   180  func TestClient_HasNodeChanged(t *testing.T) {
   181  	c := testClient(t, nil)
   182  	defer c.Shutdown()
   183  
   184  	node := c.Node()
   185  	attrHash, err := hashstructure.Hash(node.Attributes, nil)
   186  	if err != nil {
   187  		c.logger.Printf("[DEBUG] client: unable to calculate node attributes hash: %v", err)
   188  	}
   189  	// Calculate node meta map hash
   190  	metaHash, err := hashstructure.Hash(node.Meta, nil)
   191  	if err != nil {
   192  		c.logger.Printf("[DEBUG] client: unable to calculate node meta hash: %v", err)
   193  	}
   194  	if changed, _, _ := c.hasNodeChanged(attrHash, metaHash); changed {
   195  		t.Fatalf("Unexpected hash change.")
   196  	}
   197  
   198  	// Change node attribute
   199  	node.Attributes["arch"] = "xyz_86"
   200  	if changed, newAttrHash, _ := c.hasNodeChanged(attrHash, metaHash); !changed {
   201  		t.Fatalf("Expected hash change in attributes: %d vs %d", attrHash, newAttrHash)
   202  	}
   203  
   204  	// Change node meta map
   205  	node.Meta["foo"] = "bar"
   206  	if changed, _, newMetaHash := c.hasNodeChanged(attrHash, metaHash); !changed {
   207  		t.Fatalf("Expected hash change in meta map: %d vs %d", metaHash, newMetaHash)
   208  	}
   209  }
   210  
   211  func TestClient_Fingerprint_InWhitelist(t *testing.T) {
   212  	c := testClient(t, func(c *config.Config) {
   213  		if c.Options == nil {
   214  			c.Options = make(map[string]string)
   215  		}
   216  
   217  		// Weird spacing to test trimming. Whitelist all modules expect cpu.
   218  		c.Options["fingerprint.whitelist"] = "  arch, consul,cpu,env_aws,env_gce,host,memory,network,storage,foo,bar	"
   219  	})
   220  	defer c.Shutdown()
   221  
   222  	node := c.Node()
   223  	if node.Attributes["cpu.frequency"] == "" {
   224  		t.Fatalf("missing cpu fingerprint module")
   225  	}
   226  }
   227  
   228  func TestClient_Fingerprint_InBlacklist(t *testing.T) {
   229  	c := testClient(t, func(c *config.Config) {
   230  		if c.Options == nil {
   231  			c.Options = make(map[string]string)
   232  		}
   233  
   234  		// Weird spacing to test trimming. Blacklist cpu.
   235  		c.Options["fingerprint.blacklist"] = "  cpu	"
   236  	})
   237  	defer c.Shutdown()
   238  
   239  	node := c.Node()
   240  	if node.Attributes["cpu.frequency"] != "" {
   241  		t.Fatalf("cpu fingerprint module loaded despite blacklisting")
   242  	}
   243  }
   244  
   245  func TestClient_Fingerprint_OutOfWhitelist(t *testing.T) {
   246  	c := testClient(t, func(c *config.Config) {
   247  		if c.Options == nil {
   248  			c.Options = make(map[string]string)
   249  		}
   250  
   251  		c.Options["fingerprint.whitelist"] = "arch,consul,env_aws,env_gce,host,memory,network,storage,foo,bar"
   252  	})
   253  	defer c.Shutdown()
   254  
   255  	node := c.Node()
   256  	if node.Attributes["cpu.frequency"] != "" {
   257  		t.Fatalf("found cpu fingerprint module")
   258  	}
   259  }
   260  
   261  func TestClient_Fingerprint_WhitelistBlacklistCombination(t *testing.T) {
   262  	c := testClient(t, func(c *config.Config) {
   263  		if c.Options == nil {
   264  			c.Options = make(map[string]string)
   265  		}
   266  
   267  		// With both white- and blacklist, should return the set difference of modules (arch, cpu)
   268  		c.Options["fingerprint.whitelist"] = "arch,memory,cpu"
   269  		c.Options["fingerprint.blacklist"] = "memory,nomad"
   270  	})
   271  	defer c.Shutdown()
   272  
   273  	node := c.Node()
   274  	// Check expected modules are present
   275  	if node.Attributes["cpu.frequency"] == "" {
   276  		t.Fatalf("missing cpu fingerprint module")
   277  	}
   278  	if node.Attributes["cpu.arch"] == "" {
   279  		t.Fatalf("missing arch fingerprint module")
   280  	}
   281  	// Check remainder _not_ present
   282  	if node.Attributes["memory.totalbytes"] != "" {
   283  		t.Fatalf("found memory fingerprint module")
   284  	}
   285  	if node.Attributes["nomad.version"] != "" {
   286  		t.Fatalf("found nomad fingerprint module")
   287  	}
   288  }
   289  
   290  func TestClient_Drivers(t *testing.T) {
   291  	c := testClient(t, nil)
   292  	defer c.Shutdown()
   293  
   294  	node := c.Node()
   295  	if node.Attributes["driver.exec"] == "" {
   296  		if v, ok := osExecDriverSupport[runtime.GOOS]; v && ok {
   297  			t.Fatalf("missing exec driver")
   298  		} else {
   299  			t.Skipf("missing exec driver, no OS support")
   300  		}
   301  	}
   302  }
   303  
   304  func TestClient_Drivers_InWhitelist(t *testing.T) {
   305  	c := testClient(t, func(c *config.Config) {
   306  		if c.Options == nil {
   307  			c.Options = make(map[string]string)
   308  		}
   309  
   310  		// Weird spacing to test trimming
   311  		c.Options["driver.whitelist"] = "   exec ,  foo	"
   312  	})
   313  	defer c.Shutdown()
   314  
   315  	node := c.Node()
   316  	if node.Attributes["driver.exec"] == "" {
   317  		if v, ok := osExecDriverSupport[runtime.GOOS]; v && ok {
   318  			t.Fatalf("missing exec driver")
   319  		} else {
   320  			t.Skipf("missing exec driver, no OS support")
   321  		}
   322  	}
   323  }
   324  
   325  func TestClient_Drivers_InBlacklist(t *testing.T) {
   326  	c := testClient(t, func(c *config.Config) {
   327  		if c.Options == nil {
   328  			c.Options = make(map[string]string)
   329  		}
   330  
   331  		// Weird spacing to test trimming
   332  		c.Options["driver.blacklist"] = "   exec ,  foo	"
   333  	})
   334  	defer c.Shutdown()
   335  
   336  	node := c.Node()
   337  	if node.Attributes["driver.exec"] != "" {
   338  		if v, ok := osExecDriverSupport[runtime.GOOS]; !v && ok {
   339  			t.Fatalf("exec driver loaded despite blacklist")
   340  		} else {
   341  			t.Skipf("missing exec driver, no OS support")
   342  		}
   343  	}
   344  }
   345  
   346  func TestClient_Drivers_OutOfWhitelist(t *testing.T) {
   347  	c := testClient(t, func(c *config.Config) {
   348  		if c.Options == nil {
   349  			c.Options = make(map[string]string)
   350  		}
   351  
   352  		c.Options["driver.whitelist"] = "foo,bar,baz"
   353  	})
   354  	defer c.Shutdown()
   355  
   356  	node := c.Node()
   357  	if node.Attributes["driver.exec"] != "" {
   358  		t.Fatalf("found exec driver")
   359  	}
   360  }
   361  
   362  func TestClient_Drivers_WhitelistBlacklistCombination(t *testing.T) {
   363  	c := testClient(t, func(c *config.Config) {
   364  		if c.Options == nil {
   365  			c.Options = make(map[string]string)
   366  		}
   367  
   368  		// Expected output is set difference (raw_exec)
   369  		c.Options["driver.whitelist"] = "raw_exec,exec"
   370  		c.Options["driver.blacklist"] = "exec"
   371  	})
   372  	defer c.Shutdown()
   373  
   374  	node := c.Node()
   375  	// Check expected present
   376  	if node.Attributes["driver.raw_exec"] == "" {
   377  		t.Fatalf("missing raw_exec driver")
   378  	}
   379  	// Check expected absent
   380  	if node.Attributes["driver.exec"] != "" {
   381  		t.Fatalf("exec driver loaded despite blacklist")
   382  	}
   383  }
   384  
   385  func TestClient_Register(t *testing.T) {
   386  	s1, _ := testServer(t, nil)
   387  	defer s1.Shutdown()
   388  	testutil.WaitForLeader(t, s1.RPC)
   389  
   390  	c1 := testClient(t, func(c *config.Config) {
   391  		c.RPCHandler = s1
   392  	})
   393  	defer c1.Shutdown()
   394  
   395  	req := structs.NodeSpecificRequest{
   396  		NodeID:       c1.Node().ID,
   397  		QueryOptions: structs.QueryOptions{Region: "global"},
   398  	}
   399  	var out structs.SingleNodeResponse
   400  
   401  	// Register should succeed
   402  	testutil.WaitForResult(func() (bool, error) {
   403  		err := s1.RPC("Node.GetNode", &req, &out)
   404  		if err != nil {
   405  			return false, err
   406  		}
   407  		if out.Node == nil {
   408  			return false, fmt.Errorf("missing reg")
   409  		}
   410  		return out.Node.ID == req.NodeID, nil
   411  	}, func(err error) {
   412  		t.Fatalf("err: %v", err)
   413  	})
   414  }
   415  
   416  func TestClient_Heartbeat(t *testing.T) {
   417  	s1, _ := testServer(t, func(c *nomad.Config) {
   418  		c.MinHeartbeatTTL = 50 * time.Millisecond
   419  	})
   420  	defer s1.Shutdown()
   421  	testutil.WaitForLeader(t, s1.RPC)
   422  
   423  	c1 := testClient(t, func(c *config.Config) {
   424  		c.RPCHandler = s1
   425  	})
   426  	defer c1.Shutdown()
   427  
   428  	req := structs.NodeSpecificRequest{
   429  		NodeID:       c1.Node().ID,
   430  		QueryOptions: structs.QueryOptions{Region: "global"},
   431  	}
   432  	var out structs.SingleNodeResponse
   433  
   434  	// Register should succeed
   435  	testutil.WaitForResult(func() (bool, error) {
   436  		err := s1.RPC("Node.GetNode", &req, &out)
   437  		if err != nil {
   438  			return false, err
   439  		}
   440  		if out.Node == nil {
   441  			return false, fmt.Errorf("missing reg")
   442  		}
   443  		return out.Node.Status == structs.NodeStatusReady, nil
   444  	}, func(err error) {
   445  		t.Fatalf("err: %v", err)
   446  	})
   447  }
   448  
   449  func TestClient_UpdateAllocStatus(t *testing.T) {
   450  	s1, _ := testServer(t, nil)
   451  	defer s1.Shutdown()
   452  	testutil.WaitForLeader(t, s1.RPC)
   453  
   454  	c1 := testClient(t, func(c *config.Config) {
   455  		c.RPCHandler = s1
   456  	})
   457  	defer c1.Shutdown()
   458  
   459  	// Wait til the node is ready
   460  	waitTilNodeReady(c1, t)
   461  
   462  	job := mock.Job()
   463  	alloc := mock.Alloc()
   464  	alloc.NodeID = c1.Node().ID
   465  	alloc.Job = job
   466  	alloc.JobID = job.ID
   467  	originalStatus := "foo"
   468  	alloc.ClientStatus = originalStatus
   469  
   470  	// Insert at zero so they are pulled
   471  	state := s1.State()
   472  	if err := state.UpsertJob(0, job); err != nil {
   473  		t.Fatal(err)
   474  	}
   475  	if err := state.UpsertJobSummary(100, mock.JobSummary(alloc.JobID)); err != nil {
   476  		t.Fatal(err)
   477  	}
   478  	state.UpsertAllocs(101, []*structs.Allocation{alloc})
   479  
   480  	testutil.WaitForResult(func() (bool, error) {
   481  		ws := memdb.NewWatchSet()
   482  		out, err := state.AllocByID(ws, alloc.ID)
   483  		if err != nil {
   484  			return false, err
   485  		}
   486  		if out == nil {
   487  			return false, fmt.Errorf("no such alloc")
   488  		}
   489  		if out.ClientStatus == originalStatus {
   490  			return false, fmt.Errorf("Alloc client status not updated; got %v", out.ClientStatus)
   491  		}
   492  		return true, nil
   493  	}, func(err error) {
   494  		t.Fatalf("err: %v", err)
   495  	})
   496  }
   497  
   498  func TestClient_WatchAllocs(t *testing.T) {
   499  	ctestutil.ExecCompatible(t)
   500  	s1, _ := testServer(t, nil)
   501  	defer s1.Shutdown()
   502  	testutil.WaitForLeader(t, s1.RPC)
   503  
   504  	c1 := testClient(t, func(c *config.Config) {
   505  		c.RPCHandler = s1
   506  	})
   507  	defer c1.Shutdown()
   508  
   509  	// Wait til the node is ready
   510  	waitTilNodeReady(c1, t)
   511  
   512  	// Create mock allocations
   513  	job := mock.Job()
   514  	alloc1 := mock.Alloc()
   515  	alloc1.JobID = job.ID
   516  	alloc1.Job = job
   517  	alloc1.NodeID = c1.Node().ID
   518  	alloc2 := mock.Alloc()
   519  	alloc2.NodeID = c1.Node().ID
   520  	alloc2.JobID = job.ID
   521  	alloc2.Job = job
   522  
   523  	// Insert at zero so they are pulled
   524  	state := s1.State()
   525  	if err := state.UpsertJob(100, job); err != nil {
   526  		t.Fatal(err)
   527  	}
   528  	if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
   529  		t.Fatal(err)
   530  	}
   531  	err := state.UpsertAllocs(102, []*structs.Allocation{alloc1, alloc2})
   532  	if err != nil {
   533  		t.Fatalf("err: %v", err)
   534  	}
   535  
   536  	// Both allocations should get registered
   537  	testutil.WaitForResult(func() (bool, error) {
   538  		c1.allocLock.RLock()
   539  		num := len(c1.allocs)
   540  		c1.allocLock.RUnlock()
   541  		return num == 2, nil
   542  	}, func(err error) {
   543  		t.Fatalf("err: %v", err)
   544  	})
   545  
   546  	// Delete one allocation
   547  	err = state.DeleteEval(103, nil, []string{alloc1.ID})
   548  	if err != nil {
   549  		t.Fatalf("err: %v", err)
   550  	}
   551  
   552  	// Update the other allocation. Have to make a copy because the allocs are
   553  	// shared in memory in the test and the modify index would be updated in the
   554  	// alloc runner.
   555  	alloc2_2 := new(structs.Allocation)
   556  	*alloc2_2 = *alloc2
   557  	alloc2_2.DesiredStatus = structs.AllocDesiredStatusStop
   558  	err = state.UpsertAllocs(104, []*structs.Allocation{alloc2_2})
   559  	if err != nil {
   560  		t.Fatalf("err: %v", err)
   561  	}
   562  
   563  	// One allocations should get de-registered
   564  	testutil.WaitForResult(func() (bool, error) {
   565  		c1.allocLock.RLock()
   566  		num := len(c1.allocs)
   567  		c1.allocLock.RUnlock()
   568  		return num == 1, nil
   569  	}, func(err error) {
   570  		t.Fatalf("err: %v", err)
   571  	})
   572  
   573  	// One allocations should get updated
   574  	testutil.WaitForResult(func() (bool, error) {
   575  		c1.allocLock.RLock()
   576  		ar := c1.allocs[alloc2.ID]
   577  		c1.allocLock.RUnlock()
   578  		return ar.Alloc().DesiredStatus == structs.AllocDesiredStatusStop, nil
   579  	}, func(err error) {
   580  		t.Fatalf("err: %v", err)
   581  	})
   582  }
   583  
   584  func waitTilNodeReady(client *Client, t *testing.T) {
   585  	testutil.WaitForResult(func() (bool, error) {
   586  		n := client.Node()
   587  		if n.Status != structs.NodeStatusReady {
   588  			return false, fmt.Errorf("node not registered")
   589  		}
   590  		return true, nil
   591  	}, func(err error) {
   592  		t.Fatalf("err: %v", err)
   593  	})
   594  }
   595  
   596  func TestClient_SaveRestoreState(t *testing.T) {
   597  	ctestutil.ExecCompatible(t)
   598  	s1, _ := testServer(t, nil)
   599  	defer s1.Shutdown()
   600  	testutil.WaitForLeader(t, s1.RPC)
   601  
   602  	c1 := testClient(t, func(c *config.Config) {
   603  		c.DevMode = false
   604  		c.RPCHandler = s1
   605  	})
   606  	defer c1.Shutdown()
   607  
   608  	// Wait til the node is ready
   609  	waitTilNodeReady(c1, t)
   610  
   611  	// Create mock allocations
   612  	job := mock.Job()
   613  	alloc1 := mock.Alloc()
   614  	alloc1.NodeID = c1.Node().ID
   615  	alloc1.Job = job
   616  	alloc1.JobID = job.ID
   617  	alloc1.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
   618  	task := alloc1.Job.TaskGroups[0].Tasks[0]
   619  	task.Config["run_for"] = "10s"
   620  
   621  	state := s1.State()
   622  	if err := state.UpsertJob(100, job); err != nil {
   623  		t.Fatal(err)
   624  	}
   625  	if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
   626  		t.Fatal(err)
   627  	}
   628  	if err := state.UpsertAllocs(102, []*structs.Allocation{alloc1}); err != nil {
   629  		t.Fatalf("err: %v", err)
   630  	}
   631  
   632  	// Allocations should get registered
   633  	testutil.WaitForResult(func() (bool, error) {
   634  		c1.allocLock.RLock()
   635  		ar := c1.allocs[alloc1.ID]
   636  		c1.allocLock.RUnlock()
   637  		if ar == nil {
   638  			return false, fmt.Errorf("nil alloc runner")
   639  		}
   640  		if ar.Alloc().ClientStatus != structs.AllocClientStatusRunning {
   641  			return false, fmt.Errorf("client status: got %v; want %v", ar.Alloc().ClientStatus, structs.AllocClientStatusRunning)
   642  		}
   643  		return true, nil
   644  	}, func(err error) {
   645  		t.Fatalf("err: %v", err)
   646  	})
   647  
   648  	// Shutdown the client, saves state
   649  	if err := c1.Shutdown(); err != nil {
   650  		t.Fatalf("err: %v", err)
   651  	}
   652  
   653  	// Create a new client
   654  	shutdownCh := make(chan struct{})
   655  	logger := log.New(c1.config.LogOutput, "", log.LstdFlags)
   656  	consulSyncer, err := consul.NewSyncer(c1.config.ConsulConfig, shutdownCh, logger)
   657  	if err != nil {
   658  		t.Fatalf("err: %v", err)
   659  	}
   660  
   661  	c2, err := NewClient(c1.config, consulSyncer, logger)
   662  	if err != nil {
   663  		t.Fatalf("err: %v", err)
   664  	}
   665  	defer c2.Shutdown()
   666  
   667  	// Ensure the allocation is running
   668  	testutil.WaitForResult(func() (bool, error) {
   669  		c2.allocLock.RLock()
   670  		ar := c2.allocs[alloc1.ID]
   671  		c2.allocLock.RUnlock()
   672  		status := ar.Alloc().ClientStatus
   673  		alive := status == structs.AllocClientStatusRunning || status == structs.AllocClientStatusPending
   674  		if !alive {
   675  			return false, fmt.Errorf("incorrect client status: %#v", ar.Alloc())
   676  		}
   677  		return true, nil
   678  	}, func(err error) {
   679  		t.Fatalf("err: %v", err)
   680  	})
   681  
   682  	// Destroy all the allocations
   683  	for _, ar := range c2.getAllocRunners() {
   684  		ar.Destroy()
   685  	}
   686  
   687  	for _, ar := range c2.getAllocRunners() {
   688  		<-ar.WaitCh()
   689  	}
   690  }
   691  
   692  func TestClient_Init(t *testing.T) {
   693  	dir, err := ioutil.TempDir("", "nomad")
   694  	if err != nil {
   695  		t.Fatalf("err: %s", err)
   696  	}
   697  	defer os.RemoveAll(dir)
   698  	allocDir := filepath.Join(dir, "alloc")
   699  
   700  	client := &Client{
   701  		config: &config.Config{
   702  			AllocDir: allocDir,
   703  		},
   704  		logger: log.New(os.Stderr, "", log.LstdFlags),
   705  	}
   706  	if err := client.init(); err != nil {
   707  		t.Fatalf("err: %s", err)
   708  	}
   709  
   710  	if _, err := os.Stat(allocDir); err != nil {
   711  		t.Fatalf("err: %s", err)
   712  	}
   713  }
   714  
   715  func TestClient_BlockedAllocations(t *testing.T) {
   716  	s1, _ := testServer(t, nil)
   717  	defer s1.Shutdown()
   718  	testutil.WaitForLeader(t, s1.RPC)
   719  
   720  	c1 := testClient(t, func(c *config.Config) {
   721  		c.RPCHandler = s1
   722  	})
   723  	defer c1.Shutdown()
   724  
   725  	// Wait for the node to be ready
   726  	state := s1.State()
   727  	testutil.WaitForResult(func() (bool, error) {
   728  		ws := memdb.NewWatchSet()
   729  		out, err := state.NodeByID(ws, c1.Node().ID)
   730  		if err != nil {
   731  			return false, err
   732  		}
   733  		if out == nil || out.Status != structs.NodeStatusReady {
   734  			return false, fmt.Errorf("bad node: %#v", out)
   735  		}
   736  		return true, nil
   737  	}, func(err error) {
   738  		t.Fatalf("err: %v", err)
   739  	})
   740  
   741  	// Add an allocation
   742  	alloc := mock.Alloc()
   743  	alloc.NodeID = c1.Node().ID
   744  	alloc.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
   745  	alloc.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   746  		"kill_after":  "1s",
   747  		"run_for":     "100s",
   748  		"exit_code":   0,
   749  		"exit_signal": 0,
   750  		"exit_err":    "",
   751  	}
   752  
   753  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
   754  	state.UpsertAllocs(100, []*structs.Allocation{alloc})
   755  
   756  	// Wait until the client downloads and starts the allocation
   757  	testutil.WaitForResult(func() (bool, error) {
   758  		ws := memdb.NewWatchSet()
   759  		out, err := state.AllocByID(ws, alloc.ID)
   760  		if err != nil {
   761  			return false, err
   762  		}
   763  		if out == nil || out.ClientStatus != structs.AllocClientStatusRunning {
   764  			return false, fmt.Errorf("bad alloc: %#v", out)
   765  		}
   766  		return true, nil
   767  	}, func(err error) {
   768  		t.Fatalf("err: %v", err)
   769  	})
   770  
   771  	// Add a new chained alloc
   772  	alloc2 := alloc.Copy()
   773  	alloc2.ID = structs.GenerateUUID()
   774  	alloc2.Job = alloc.Job
   775  	alloc2.JobID = alloc.JobID
   776  	alloc2.PreviousAllocation = alloc.ID
   777  	if err := state.UpsertAllocs(200, []*structs.Allocation{alloc2}); err != nil {
   778  		t.Fatalf("err: %v", err)
   779  	}
   780  
   781  	// Enusre that the chained allocation is being tracked as blocked
   782  	testutil.WaitForResult(func() (bool, error) {
   783  		alloc, ok := c1.blockedAllocations[alloc2.PreviousAllocation]
   784  		if ok && alloc.ID == alloc2.ID {
   785  			return true, nil
   786  		}
   787  		return false, fmt.Errorf("no blocked allocations")
   788  	}, func(err error) {
   789  		t.Fatalf("err: %v", err)
   790  	})
   791  
   792  	// Change the desired state of the parent alloc to stop
   793  	alloc1 := alloc.Copy()
   794  	alloc1.DesiredStatus = structs.AllocDesiredStatusStop
   795  	if err := state.UpsertAllocs(300, []*structs.Allocation{alloc1}); err != nil {
   796  		t.Fatalf("err: %v", err)
   797  	}
   798  
   799  	// Ensure that there are no blocked allocations
   800  	testutil.WaitForResult(func() (bool, error) {
   801  		_, ok := c1.blockedAllocations[alloc2.PreviousAllocation]
   802  		if ok {
   803  			return false, fmt.Errorf("blocked evals present")
   804  		}
   805  		return true, nil
   806  	}, func(err error) {
   807  		t.Fatalf("err: %v", err)
   808  	})
   809  
   810  	// Destroy all the allocations
   811  	for _, ar := range c1.getAllocRunners() {
   812  		ar.Destroy()
   813  	}
   814  
   815  	for _, ar := range c1.getAllocRunners() {
   816  		<-ar.WaitCh()
   817  	}
   818  }
   819  
   820  func TestClient_UnarchiveAllocDir(t *testing.T) {
   821  	dir, err := ioutil.TempDir("", "")
   822  	if err != nil {
   823  		t.Fatalf("err: %v", err)
   824  	}
   825  	defer os.RemoveAll(dir)
   826  
   827  	if err := os.Mkdir(filepath.Join(dir, "foo"), 0777); err != nil {
   828  		t.Fatalf("err: %v", err)
   829  	}
   830  	dirInfo, err := os.Stat(filepath.Join(dir, "foo"))
   831  	if err != nil {
   832  		t.Fatalf("err: %v", err)
   833  	}
   834  	f, err := os.Create(filepath.Join(dir, "foo", "bar"))
   835  	if err != nil {
   836  		t.Fatalf("err: %v", err)
   837  	}
   838  	if _, err := f.WriteString("foo"); err != nil {
   839  		t.Fatalf("err: %v", err)
   840  	}
   841  	if err := f.Chmod(0644); err != nil {
   842  		t.Fatalf("err: %v", err)
   843  	}
   844  	fInfo, err := f.Stat()
   845  	if err != nil {
   846  		t.Fatalf("err: %v", err)
   847  	}
   848  	f.Close()
   849  
   850  	buf := new(bytes.Buffer)
   851  	tw := tar.NewWriter(buf)
   852  
   853  	walkFn := func(path string, fileInfo os.FileInfo, err error) error {
   854  		// Ignore if the file is a symlink
   855  		if fileInfo.Mode() == os.ModeSymlink {
   856  			return nil
   857  		}
   858  
   859  		// Include the path of the file name relative to the alloc dir
   860  		// so that we can put the files in the right directories
   861  		hdr, err := tar.FileInfoHeader(fileInfo, "")
   862  		if err != nil {
   863  			return fmt.Errorf("error creating file header: %v", err)
   864  		}
   865  		hdr.Name = fileInfo.Name()
   866  		tw.WriteHeader(hdr)
   867  
   868  		// If it's a directory we just write the header into the tar
   869  		if fileInfo.IsDir() {
   870  			return nil
   871  		}
   872  
   873  		// Write the file into the archive
   874  		file, err := os.Open(path)
   875  		if err != nil {
   876  			return err
   877  		}
   878  		defer file.Close()
   879  
   880  		if _, err := io.Copy(tw, file); err != nil {
   881  			return err
   882  		}
   883  
   884  		return nil
   885  	}
   886  
   887  	if err := filepath.Walk(dir, walkFn); err != nil {
   888  		t.Fatalf("err: %v", err)
   889  	}
   890  	tw.Close()
   891  
   892  	dir1, err := ioutil.TempDir("", "")
   893  	if err != nil {
   894  		t.Fatalf("err: %v", err)
   895  	}
   896  	defer os.RemoveAll(dir1)
   897  
   898  	c1 := testClient(t, func(c *config.Config) {
   899  		c.RPCHandler = nil
   900  	})
   901  	defer c1.Shutdown()
   902  
   903  	rc := ioutil.NopCloser(buf)
   904  
   905  	c1.migratingAllocs["123"] = newMigrateAllocCtrl(mock.Alloc())
   906  	if err := c1.unarchiveAllocDir(rc, "123", dir1); err != nil {
   907  		t.Fatalf("err: %v", err)
   908  	}
   909  
   910  	// Ensure foo is present
   911  	fi, err := os.Stat(filepath.Join(dir1, "foo"))
   912  	if err != nil {
   913  		t.Fatalf("err: %v", err)
   914  	}
   915  	if fi.Mode() != dirInfo.Mode() {
   916  		t.Fatalf("mode: %v", fi.Mode())
   917  	}
   918  
   919  	fi1, err := os.Stat(filepath.Join(dir1, "bar"))
   920  	if err != nil {
   921  		t.Fatalf("err: %v", err)
   922  	}
   923  	if fi1.Mode() != fInfo.Mode() {
   924  		t.Fatalf("mode: %v", fi1.Mode())
   925  	}
   926  }