github.com/taylorchu/nomad@v0.5.3-rc1.0.20170407200202-db11e7dd7b55/client/client_test.go (about)

     1  package client
     2  
     3  import (
     4  	"archive/tar"
     5  	"bytes"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  	"log"
    10  	"net"
    11  	"os"
    12  	"path/filepath"
    13  	"runtime"
    14  	"sync/atomic"
    15  	"testing"
    16  	"time"
    17  
    18  	memdb "github.com/hashicorp/go-memdb"
    19  	"github.com/hashicorp/nomad/client/config"
    20  	"github.com/hashicorp/nomad/command/agent/consul"
    21  	"github.com/hashicorp/nomad/nomad"
    22  	"github.com/hashicorp/nomad/nomad/mock"
    23  	"github.com/hashicorp/nomad/nomad/structs"
    24  	nconfig "github.com/hashicorp/nomad/nomad/structs/config"
    25  	"github.com/hashicorp/nomad/testutil"
    26  	"github.com/mitchellh/hashstructure"
    27  
    28  	ctestutil "github.com/hashicorp/nomad/client/testutil"
    29  )
    30  
    31  var (
    32  	nextPort uint32 = 16000
    33  
    34  	osExecDriverSupport = map[string]bool{
    35  		"linux": true,
    36  	}
    37  )
    38  
    39  func getPort() int {
    40  	return int(atomic.AddUint32(&nextPort, 1))
    41  }
    42  
    43  func testServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string) {
    44  	f := false
    45  
    46  	// Setup the default settings
    47  	config := nomad.DefaultConfig()
    48  	config.VaultConfig.Enabled = &f
    49  	config.Build = "unittest"
    50  	config.DevMode = true
    51  	config.RPCAddr = &net.TCPAddr{
    52  		IP:   []byte{127, 0, 0, 1},
    53  		Port: getPort(),
    54  	}
    55  	config.NodeName = fmt.Sprintf("Node %d", config.RPCAddr.Port)
    56  
    57  	// Tighten the Serf timing
    58  	config.SerfConfig.MemberlistConfig.BindAddr = "127.0.0.1"
    59  	config.SerfConfig.MemberlistConfig.BindPort = getPort()
    60  	config.SerfConfig.MemberlistConfig.SuspicionMult = 2
    61  	config.SerfConfig.MemberlistConfig.RetransmitMult = 2
    62  	config.SerfConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
    63  	config.SerfConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
    64  	config.SerfConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
    65  
    66  	// Tighten the Raft timing
    67  	config.RaftConfig.LeaderLeaseTimeout = 20 * time.Millisecond
    68  	config.RaftConfig.HeartbeatTimeout = 40 * time.Millisecond
    69  	config.RaftConfig.ElectionTimeout = 40 * time.Millisecond
    70  	config.RaftConfig.StartAsLeader = true
    71  	config.RaftTimeout = 500 * time.Millisecond
    72  
    73  	// Invoke the callback if any
    74  	if cb != nil {
    75  		cb(config)
    76  	}
    77  
    78  	shutdownCh := make(chan struct{})
    79  	logger := log.New(config.LogOutput, "", log.LstdFlags)
    80  	consulSyncer, err := consul.NewSyncer(config.ConsulConfig, shutdownCh, logger)
    81  	if err != nil {
    82  		t.Fatalf("err: %v", err)
    83  	}
    84  
    85  	// Create server
    86  	server, err := nomad.NewServer(config, consulSyncer, logger)
    87  	if err != nil {
    88  		t.Fatalf("err: %v", err)
    89  	}
    90  	return server, config.RPCAddr.String()
    91  }
    92  
    93  func testClient(t *testing.T, cb func(c *config.Config)) *Client {
    94  	f := false
    95  
    96  	conf := config.DefaultConfig()
    97  	conf.VaultConfig.Enabled = &f
    98  	conf.DevMode = true
    99  	conf.Node = &structs.Node{
   100  		Reserved: &structs.Resources{
   101  			DiskMB: 0,
   102  		},
   103  	}
   104  	if cb != nil {
   105  		cb(conf)
   106  	}
   107  
   108  	shutdownCh := make(chan struct{})
   109  	consulSyncer, err := consul.NewSyncer(conf.ConsulConfig, shutdownCh, log.New(os.Stderr, "", log.LstdFlags))
   110  	if err != nil {
   111  		t.Fatalf("err: %v", err)
   112  	}
   113  
   114  	logger := log.New(conf.LogOutput, "", log.LstdFlags)
   115  	client, err := NewClient(conf, consulSyncer, logger)
   116  	if err != nil {
   117  		t.Fatalf("err: %v", err)
   118  	}
   119  	return client
   120  }
   121  
   122  func TestClient_StartStop(t *testing.T) {
   123  	client := testClient(t, nil)
   124  	if err := client.Shutdown(); err != nil {
   125  		t.Fatalf("err: %v", err)
   126  	}
   127  }
   128  
   129  func TestClient_RPC(t *testing.T) {
   130  	s1, addr := testServer(t, nil)
   131  	defer s1.Shutdown()
   132  
   133  	c1 := testClient(t, func(c *config.Config) {
   134  		c.Servers = []string{addr}
   135  	})
   136  	defer c1.Shutdown()
   137  
   138  	// RPC should succeed
   139  	testutil.WaitForResult(func() (bool, error) {
   140  		var out struct{}
   141  		err := c1.RPC("Status.Ping", struct{}{}, &out)
   142  		return err == nil, err
   143  	}, func(err error) {
   144  		t.Fatalf("err: %v", err)
   145  	})
   146  }
   147  
   148  func TestClient_RPC_Passthrough(t *testing.T) {
   149  	s1, _ := testServer(t, nil)
   150  	defer s1.Shutdown()
   151  
   152  	c1 := testClient(t, func(c *config.Config) {
   153  		c.RPCHandler = s1
   154  	})
   155  	defer c1.Shutdown()
   156  
   157  	// RPC should succeed
   158  	testutil.WaitForResult(func() (bool, error) {
   159  		var out struct{}
   160  		err := c1.RPC("Status.Ping", struct{}{}, &out)
   161  		return err == nil, err
   162  	}, func(err error) {
   163  		t.Fatalf("err: %v", err)
   164  	})
   165  }
   166  
   167  func TestClient_Fingerprint(t *testing.T) {
   168  	c := testClient(t, nil)
   169  	defer c.Shutdown()
   170  
   171  	// Ensure kernel and arch are always present
   172  	node := c.Node()
   173  	if node.Attributes["kernel.name"] == "" {
   174  		t.Fatalf("missing kernel.name")
   175  	}
   176  	if node.Attributes["cpu.arch"] == "" {
   177  		t.Fatalf("missing cpu arch")
   178  	}
   179  }
   180  
   181  func TestClient_HasNodeChanged(t *testing.T) {
   182  	c := testClient(t, nil)
   183  	defer c.Shutdown()
   184  
   185  	node := c.Node()
   186  	attrHash, err := hashstructure.Hash(node.Attributes, nil)
   187  	if err != nil {
   188  		c.logger.Printf("[DEBUG] client: unable to calculate node attributes hash: %v", err)
   189  	}
   190  	// Calculate node meta map hash
   191  	metaHash, err := hashstructure.Hash(node.Meta, nil)
   192  	if err != nil {
   193  		c.logger.Printf("[DEBUG] client: unable to calculate node meta hash: %v", err)
   194  	}
   195  	if changed, _, _ := c.hasNodeChanged(attrHash, metaHash); changed {
   196  		t.Fatalf("Unexpected hash change.")
   197  	}
   198  
   199  	// Change node attribute
   200  	node.Attributes["arch"] = "xyz_86"
   201  	if changed, newAttrHash, _ := c.hasNodeChanged(attrHash, metaHash); !changed {
   202  		t.Fatalf("Expected hash change in attributes: %d vs %d", attrHash, newAttrHash)
   203  	}
   204  
   205  	// Change node meta map
   206  	node.Meta["foo"] = "bar"
   207  	if changed, _, newMetaHash := c.hasNodeChanged(attrHash, metaHash); !changed {
   208  		t.Fatalf("Expected hash change in meta map: %d vs %d", metaHash, newMetaHash)
   209  	}
   210  }
   211  
   212  func TestClient_Fingerprint_InWhitelist(t *testing.T) {
   213  	c := testClient(t, func(c *config.Config) {
   214  		if c.Options == nil {
   215  			c.Options = make(map[string]string)
   216  		}
   217  
   218  		// Weird spacing to test trimming. Whitelist all modules expect cpu.
   219  		c.Options["fingerprint.whitelist"] = "  arch, consul,cpu,env_aws,env_gce,host,memory,network,storage,foo,bar	"
   220  	})
   221  	defer c.Shutdown()
   222  
   223  	node := c.Node()
   224  	if node.Attributes["cpu.frequency"] == "" {
   225  		t.Fatalf("missing cpu fingerprint module")
   226  	}
   227  }
   228  
   229  func TestClient_Fingerprint_InBlacklist(t *testing.T) {
   230  	c := testClient(t, func(c *config.Config) {
   231  		if c.Options == nil {
   232  			c.Options = make(map[string]string)
   233  		}
   234  
   235  		// Weird spacing to test trimming. Blacklist cpu.
   236  		c.Options["fingerprint.blacklist"] = "  cpu	"
   237  	})
   238  	defer c.Shutdown()
   239  
   240  	node := c.Node()
   241  	if node.Attributes["cpu.frequency"] != "" {
   242  		t.Fatalf("cpu fingerprint module loaded despite blacklisting")
   243  	}
   244  }
   245  
   246  func TestClient_Fingerprint_OutOfWhitelist(t *testing.T) {
   247  	c := testClient(t, func(c *config.Config) {
   248  		if c.Options == nil {
   249  			c.Options = make(map[string]string)
   250  		}
   251  
   252  		c.Options["fingerprint.whitelist"] = "arch,consul,env_aws,env_gce,host,memory,network,storage,foo,bar"
   253  	})
   254  	defer c.Shutdown()
   255  
   256  	node := c.Node()
   257  	if node.Attributes["cpu.frequency"] != "" {
   258  		t.Fatalf("found cpu fingerprint module")
   259  	}
   260  }
   261  
   262  func TestClient_Fingerprint_WhitelistBlacklistCombination(t *testing.T) {
   263  	c := testClient(t, func(c *config.Config) {
   264  		if c.Options == nil {
   265  			c.Options = make(map[string]string)
   266  		}
   267  
   268  		// With both white- and blacklist, should return the set difference of modules (arch, cpu)
   269  		c.Options["fingerprint.whitelist"] = "arch,memory,cpu"
   270  		c.Options["fingerprint.blacklist"] = "memory,nomad"
   271  	})
   272  	defer c.Shutdown()
   273  
   274  	node := c.Node()
   275  	// Check expected modules are present
   276  	if node.Attributes["cpu.frequency"] == "" {
   277  		t.Fatalf("missing cpu fingerprint module")
   278  	}
   279  	if node.Attributes["cpu.arch"] == "" {
   280  		t.Fatalf("missing arch fingerprint module")
   281  	}
   282  	// Check remainder _not_ present
   283  	if node.Attributes["memory.totalbytes"] != "" {
   284  		t.Fatalf("found memory fingerprint module")
   285  	}
   286  	if node.Attributes["nomad.version"] != "" {
   287  		t.Fatalf("found nomad fingerprint module")
   288  	}
   289  }
   290  
   291  func TestClient_Drivers(t *testing.T) {
   292  	c := testClient(t, nil)
   293  	defer c.Shutdown()
   294  
   295  	node := c.Node()
   296  	if node.Attributes["driver.exec"] == "" {
   297  		if v, ok := osExecDriverSupport[runtime.GOOS]; v && ok {
   298  			t.Fatalf("missing exec driver")
   299  		} else {
   300  			t.Skipf("missing exec driver, no OS support")
   301  		}
   302  	}
   303  }
   304  
   305  func TestClient_Drivers_InWhitelist(t *testing.T) {
   306  	c := testClient(t, func(c *config.Config) {
   307  		if c.Options == nil {
   308  			c.Options = make(map[string]string)
   309  		}
   310  
   311  		// Weird spacing to test trimming
   312  		c.Options["driver.whitelist"] = "   exec ,  foo	"
   313  	})
   314  	defer c.Shutdown()
   315  
   316  	node := c.Node()
   317  	if node.Attributes["driver.exec"] == "" {
   318  		if v, ok := osExecDriverSupport[runtime.GOOS]; v && ok {
   319  			t.Fatalf("missing exec driver")
   320  		} else {
   321  			t.Skipf("missing exec driver, no OS support")
   322  		}
   323  	}
   324  }
   325  
   326  func TestClient_Drivers_InBlacklist(t *testing.T) {
   327  	c := testClient(t, func(c *config.Config) {
   328  		if c.Options == nil {
   329  			c.Options = make(map[string]string)
   330  		}
   331  
   332  		// Weird spacing to test trimming
   333  		c.Options["driver.blacklist"] = "   exec ,  foo	"
   334  	})
   335  	defer c.Shutdown()
   336  
   337  	node := c.Node()
   338  	if node.Attributes["driver.exec"] != "" {
   339  		if v, ok := osExecDriverSupport[runtime.GOOS]; !v && ok {
   340  			t.Fatalf("exec driver loaded despite blacklist")
   341  		} else {
   342  			t.Skipf("missing exec driver, no OS support")
   343  		}
   344  	}
   345  }
   346  
   347  func TestClient_Drivers_OutOfWhitelist(t *testing.T) {
   348  	c := testClient(t, func(c *config.Config) {
   349  		if c.Options == nil {
   350  			c.Options = make(map[string]string)
   351  		}
   352  
   353  		c.Options["driver.whitelist"] = "foo,bar,baz"
   354  	})
   355  	defer c.Shutdown()
   356  
   357  	node := c.Node()
   358  	if node.Attributes["driver.exec"] != "" {
   359  		t.Fatalf("found exec driver")
   360  	}
   361  }
   362  
   363  func TestClient_Drivers_WhitelistBlacklistCombination(t *testing.T) {
   364  	c := testClient(t, func(c *config.Config) {
   365  		if c.Options == nil {
   366  			c.Options = make(map[string]string)
   367  		}
   368  
   369  		// Expected output is set difference (raw_exec)
   370  		c.Options["driver.whitelist"] = "raw_exec,exec"
   371  		c.Options["driver.blacklist"] = "exec"
   372  	})
   373  	defer c.Shutdown()
   374  
   375  	node := c.Node()
   376  	// Check expected present
   377  	if node.Attributes["driver.raw_exec"] == "" {
   378  		t.Fatalf("missing raw_exec driver")
   379  	}
   380  	// Check expected absent
   381  	if node.Attributes["driver.exec"] != "" {
   382  		t.Fatalf("exec driver loaded despite blacklist")
   383  	}
   384  }
   385  
   386  // TestClient_MixedTLS asserts that when a server is running with TLS enabled
   387  // it will reject any RPC connections from clients that lack TLS. See #2525
   388  func TestClient_MixedTLS(t *testing.T) {
   389  	const (
   390  		cafile  = "../helper/tlsutil/testdata/ca.pem"
   391  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
   392  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
   393  	)
   394  	s1, addr := testServer(t, func(c *nomad.Config) {
   395  		c.TLSConfig = &nconfig.TLSConfig{
   396  			EnableHTTP:           true,
   397  			EnableRPC:            true,
   398  			VerifyServerHostname: true,
   399  			CAFile:               cafile,
   400  			CertFile:             foocert,
   401  			KeyFile:              fookey,
   402  		}
   403  	})
   404  	defer s1.Shutdown()
   405  	testutil.WaitForLeader(t, s1.RPC)
   406  
   407  	c1 := testClient(t, func(c *config.Config) {
   408  		c.Servers = []string{addr}
   409  	})
   410  	defer c1.Shutdown()
   411  
   412  	req := structs.NodeSpecificRequest{
   413  		NodeID:       c1.Node().ID,
   414  		QueryOptions: structs.QueryOptions{Region: "global"},
   415  	}
   416  	var out structs.SingleNodeResponse
   417  	testutil.AssertUntil(100*time.Millisecond,
   418  		func() (bool, error) {
   419  			err := c1.RPC("Node.GetNode", &req, &out)
   420  			if err == nil {
   421  				return false, fmt.Errorf("client RPC succeeded when it should have failed:\n%+v", out)
   422  			}
   423  			return true, nil
   424  		},
   425  		func(err error) {
   426  			t.Fatalf(err.Error())
   427  		},
   428  	)
   429  }
   430  
   431  // TestClient_BadTLS asserts that when a client and server are running with TLS
   432  // enabled -- but their certificates are signed by different CAs -- they're
   433  // unable to communicate.
   434  func TestClient_BadTLS(t *testing.T) {
   435  	const (
   436  		cafile  = "../helper/tlsutil/testdata/ca.pem"
   437  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
   438  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
   439  		badca   = "../helper/tlsutil/testdata/ca-bad.pem"
   440  		badcert = "../helper/tlsutil/testdata/nomad-bad.pem"
   441  		badkey  = "../helper/tlsutil/testdata/nomad-bad-key.pem"
   442  	)
   443  	s1, addr := testServer(t, func(c *nomad.Config) {
   444  		c.TLSConfig = &nconfig.TLSConfig{
   445  			EnableHTTP:           true,
   446  			EnableRPC:            true,
   447  			VerifyServerHostname: true,
   448  			CAFile:               cafile,
   449  			CertFile:             foocert,
   450  			KeyFile:              fookey,
   451  		}
   452  	})
   453  	defer s1.Shutdown()
   454  	testutil.WaitForLeader(t, s1.RPC)
   455  
   456  	c1 := testClient(t, func(c *config.Config) {
   457  		c.Servers = []string{addr}
   458  		c.TLSConfig = &nconfig.TLSConfig{
   459  			EnableHTTP:           true,
   460  			EnableRPC:            true,
   461  			VerifyServerHostname: true,
   462  			CAFile:               badca,
   463  			CertFile:             badcert,
   464  			KeyFile:              badkey,
   465  		}
   466  	})
   467  	defer c1.Shutdown()
   468  
   469  	req := structs.NodeSpecificRequest{
   470  		NodeID:       c1.Node().ID,
   471  		QueryOptions: structs.QueryOptions{Region: "global"},
   472  	}
   473  	var out structs.SingleNodeResponse
   474  	testutil.AssertUntil(100*time.Millisecond,
   475  		func() (bool, error) {
   476  			err := c1.RPC("Node.GetNode", &req, &out)
   477  			if err == nil {
   478  				return false, fmt.Errorf("client RPC succeeded when it should have failed:\n%+v", out)
   479  			}
   480  			return true, nil
   481  		},
   482  		func(err error) {
   483  			t.Fatalf(err.Error())
   484  		},
   485  	)
   486  }
   487  
   488  func TestClient_Register(t *testing.T) {
   489  	s1, _ := testServer(t, nil)
   490  	defer s1.Shutdown()
   491  	testutil.WaitForLeader(t, s1.RPC)
   492  
   493  	c1 := testClient(t, func(c *config.Config) {
   494  		c.RPCHandler = s1
   495  	})
   496  	defer c1.Shutdown()
   497  
   498  	req := structs.NodeSpecificRequest{
   499  		NodeID:       c1.Node().ID,
   500  		QueryOptions: structs.QueryOptions{Region: "global"},
   501  	}
   502  	var out structs.SingleNodeResponse
   503  
   504  	// Register should succeed
   505  	testutil.WaitForResult(func() (bool, error) {
   506  		err := s1.RPC("Node.GetNode", &req, &out)
   507  		if err != nil {
   508  			return false, err
   509  		}
   510  		if out.Node == nil {
   511  			return false, fmt.Errorf("missing reg")
   512  		}
   513  		return out.Node.ID == req.NodeID, nil
   514  	}, func(err error) {
   515  		t.Fatalf("err: %v", err)
   516  	})
   517  }
   518  
   519  func TestClient_Heartbeat(t *testing.T) {
   520  	s1, _ := testServer(t, func(c *nomad.Config) {
   521  		c.MinHeartbeatTTL = 50 * time.Millisecond
   522  	})
   523  	defer s1.Shutdown()
   524  	testutil.WaitForLeader(t, s1.RPC)
   525  
   526  	c1 := testClient(t, func(c *config.Config) {
   527  		c.RPCHandler = s1
   528  	})
   529  	defer c1.Shutdown()
   530  
   531  	req := structs.NodeSpecificRequest{
   532  		NodeID:       c1.Node().ID,
   533  		QueryOptions: structs.QueryOptions{Region: "global"},
   534  	}
   535  	var out structs.SingleNodeResponse
   536  
   537  	// Register should succeed
   538  	testutil.WaitForResult(func() (bool, error) {
   539  		err := s1.RPC("Node.GetNode", &req, &out)
   540  		if err != nil {
   541  			return false, err
   542  		}
   543  		if out.Node == nil {
   544  			return false, fmt.Errorf("missing reg")
   545  		}
   546  		return out.Node.Status == structs.NodeStatusReady, nil
   547  	}, func(err error) {
   548  		t.Fatalf("err: %v", err)
   549  	})
   550  }
   551  
   552  func TestClient_UpdateAllocStatus(t *testing.T) {
   553  	s1, _ := testServer(t, nil)
   554  	defer s1.Shutdown()
   555  	testutil.WaitForLeader(t, s1.RPC)
   556  
   557  	c1 := testClient(t, func(c *config.Config) {
   558  		c.RPCHandler = s1
   559  	})
   560  	defer c1.Shutdown()
   561  
   562  	// Wait til the node is ready
   563  	waitTilNodeReady(c1, t)
   564  
   565  	job := mock.Job()
   566  	alloc := mock.Alloc()
   567  	alloc.NodeID = c1.Node().ID
   568  	alloc.Job = job
   569  	alloc.JobID = job.ID
   570  	originalStatus := "foo"
   571  	alloc.ClientStatus = originalStatus
   572  
   573  	// Insert at zero so they are pulled
   574  	state := s1.State()
   575  	if err := state.UpsertJob(0, job); err != nil {
   576  		t.Fatal(err)
   577  	}
   578  	if err := state.UpsertJobSummary(100, mock.JobSummary(alloc.JobID)); err != nil {
   579  		t.Fatal(err)
   580  	}
   581  	state.UpsertAllocs(101, []*structs.Allocation{alloc})
   582  
   583  	testutil.WaitForResult(func() (bool, error) {
   584  		ws := memdb.NewWatchSet()
   585  		out, err := state.AllocByID(ws, alloc.ID)
   586  		if err != nil {
   587  			return false, err
   588  		}
   589  		if out == nil {
   590  			return false, fmt.Errorf("no such alloc")
   591  		}
   592  		if out.ClientStatus == originalStatus {
   593  			return false, fmt.Errorf("Alloc client status not updated; got %v", out.ClientStatus)
   594  		}
   595  		return true, nil
   596  	}, func(err error) {
   597  		t.Fatalf("err: %v", err)
   598  	})
   599  }
   600  
   601  func TestClient_WatchAllocs(t *testing.T) {
   602  	ctestutil.ExecCompatible(t)
   603  	s1, _ := testServer(t, nil)
   604  	defer s1.Shutdown()
   605  	testutil.WaitForLeader(t, s1.RPC)
   606  
   607  	c1 := testClient(t, func(c *config.Config) {
   608  		c.RPCHandler = s1
   609  	})
   610  	defer c1.Shutdown()
   611  
   612  	// Wait til the node is ready
   613  	waitTilNodeReady(c1, t)
   614  
   615  	// Create mock allocations
   616  	job := mock.Job()
   617  	alloc1 := mock.Alloc()
   618  	alloc1.JobID = job.ID
   619  	alloc1.Job = job
   620  	alloc1.NodeID = c1.Node().ID
   621  	alloc2 := mock.Alloc()
   622  	alloc2.NodeID = c1.Node().ID
   623  	alloc2.JobID = job.ID
   624  	alloc2.Job = job
   625  
   626  	// Insert at zero so they are pulled
   627  	state := s1.State()
   628  	if err := state.UpsertJob(100, job); err != nil {
   629  		t.Fatal(err)
   630  	}
   631  	if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
   632  		t.Fatal(err)
   633  	}
   634  	err := state.UpsertAllocs(102, []*structs.Allocation{alloc1, alloc2})
   635  	if err != nil {
   636  		t.Fatalf("err: %v", err)
   637  	}
   638  
   639  	// Both allocations should get registered
   640  	testutil.WaitForResult(func() (bool, error) {
   641  		c1.allocLock.RLock()
   642  		num := len(c1.allocs)
   643  		c1.allocLock.RUnlock()
   644  		return num == 2, nil
   645  	}, func(err error) {
   646  		t.Fatalf("err: %v", err)
   647  	})
   648  
   649  	// Delete one allocation
   650  	err = state.DeleteEval(103, nil, []string{alloc1.ID})
   651  	if err != nil {
   652  		t.Fatalf("err: %v", err)
   653  	}
   654  
   655  	// Update the other allocation. Have to make a copy because the allocs are
   656  	// shared in memory in the test and the modify index would be updated in the
   657  	// alloc runner.
   658  	alloc2_2 := new(structs.Allocation)
   659  	*alloc2_2 = *alloc2
   660  	alloc2_2.DesiredStatus = structs.AllocDesiredStatusStop
   661  	err = state.UpsertAllocs(104, []*structs.Allocation{alloc2_2})
   662  	if err != nil {
   663  		t.Fatalf("err: %v", err)
   664  	}
   665  
   666  	// One allocations should get de-registered
   667  	testutil.WaitForResult(func() (bool, error) {
   668  		c1.allocLock.RLock()
   669  		num := len(c1.allocs)
   670  		c1.allocLock.RUnlock()
   671  		return num == 1, nil
   672  	}, func(err error) {
   673  		t.Fatalf("err: %v", err)
   674  	})
   675  
   676  	// One allocations should get updated
   677  	testutil.WaitForResult(func() (bool, error) {
   678  		c1.allocLock.RLock()
   679  		ar := c1.allocs[alloc2.ID]
   680  		c1.allocLock.RUnlock()
   681  		return ar.Alloc().DesiredStatus == structs.AllocDesiredStatusStop, nil
   682  	}, func(err error) {
   683  		t.Fatalf("err: %v", err)
   684  	})
   685  }
   686  
   687  func waitTilNodeReady(client *Client, t *testing.T) {
   688  	testutil.WaitForResult(func() (bool, error) {
   689  		n := client.Node()
   690  		if n.Status != structs.NodeStatusReady {
   691  			return false, fmt.Errorf("node not registered")
   692  		}
   693  		return true, nil
   694  	}, func(err error) {
   695  		t.Fatalf("err: %v", err)
   696  	})
   697  }
   698  
   699  func TestClient_SaveRestoreState(t *testing.T) {
   700  	ctestutil.ExecCompatible(t)
   701  	s1, _ := testServer(t, nil)
   702  	defer s1.Shutdown()
   703  	testutil.WaitForLeader(t, s1.RPC)
   704  
   705  	c1 := testClient(t, func(c *config.Config) {
   706  		c.DevMode = false
   707  		c.RPCHandler = s1
   708  	})
   709  	defer c1.Shutdown()
   710  
   711  	// Wait til the node is ready
   712  	waitTilNodeReady(c1, t)
   713  
   714  	// Create mock allocations
   715  	job := mock.Job()
   716  	alloc1 := mock.Alloc()
   717  	alloc1.NodeID = c1.Node().ID
   718  	alloc1.Job = job
   719  	alloc1.JobID = job.ID
   720  	alloc1.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
   721  	task := alloc1.Job.TaskGroups[0].Tasks[0]
   722  	task.Config["run_for"] = "10s"
   723  
   724  	state := s1.State()
   725  	if err := state.UpsertJob(100, job); err != nil {
   726  		t.Fatal(err)
   727  	}
   728  	if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
   729  		t.Fatal(err)
   730  	}
   731  	if err := state.UpsertAllocs(102, []*structs.Allocation{alloc1}); err != nil {
   732  		t.Fatalf("err: %v", err)
   733  	}
   734  
   735  	// Allocations should get registered
   736  	testutil.WaitForResult(func() (bool, error) {
   737  		c1.allocLock.RLock()
   738  		ar := c1.allocs[alloc1.ID]
   739  		c1.allocLock.RUnlock()
   740  		if ar == nil {
   741  			return false, fmt.Errorf("nil alloc runner")
   742  		}
   743  		if ar.Alloc().ClientStatus != structs.AllocClientStatusRunning {
   744  			return false, fmt.Errorf("client status: got %v; want %v", ar.Alloc().ClientStatus, structs.AllocClientStatusRunning)
   745  		}
   746  		return true, nil
   747  	}, func(err error) {
   748  		t.Fatalf("err: %v", err)
   749  	})
   750  
   751  	// Shutdown the client, saves state
   752  	if err := c1.Shutdown(); err != nil {
   753  		t.Fatalf("err: %v", err)
   754  	}
   755  
   756  	// Create a new client
   757  	shutdownCh := make(chan struct{})
   758  	logger := log.New(c1.config.LogOutput, "", log.LstdFlags)
   759  	consulSyncer, err := consul.NewSyncer(c1.config.ConsulConfig, shutdownCh, logger)
   760  	if err != nil {
   761  		t.Fatalf("err: %v", err)
   762  	}
   763  
   764  	c2, err := NewClient(c1.config, consulSyncer, logger)
   765  	if err != nil {
   766  		t.Fatalf("err: %v", err)
   767  	}
   768  	defer c2.Shutdown()
   769  
   770  	// Ensure the allocation is running
   771  	testutil.WaitForResult(func() (bool, error) {
   772  		c2.allocLock.RLock()
   773  		ar := c2.allocs[alloc1.ID]
   774  		c2.allocLock.RUnlock()
   775  		status := ar.Alloc().ClientStatus
   776  		alive := status == structs.AllocClientStatusRunning || status == structs.AllocClientStatusPending
   777  		if !alive {
   778  			return false, fmt.Errorf("incorrect client status: %#v", ar.Alloc())
   779  		}
   780  		return true, nil
   781  	}, func(err error) {
   782  		t.Fatalf("err: %v", err)
   783  	})
   784  
   785  	// Destroy all the allocations
   786  	for _, ar := range c2.getAllocRunners() {
   787  		ar.Destroy()
   788  	}
   789  
   790  	for _, ar := range c2.getAllocRunners() {
   791  		<-ar.WaitCh()
   792  	}
   793  }
   794  
   795  func TestClient_Init(t *testing.T) {
   796  	dir, err := ioutil.TempDir("", "nomad")
   797  	if err != nil {
   798  		t.Fatalf("err: %s", err)
   799  	}
   800  	defer os.RemoveAll(dir)
   801  	allocDir := filepath.Join(dir, "alloc")
   802  
   803  	client := &Client{
   804  		config: &config.Config{
   805  			AllocDir: allocDir,
   806  		},
   807  		logger: log.New(os.Stderr, "", log.LstdFlags),
   808  	}
   809  	if err := client.init(); err != nil {
   810  		t.Fatalf("err: %s", err)
   811  	}
   812  
   813  	if _, err := os.Stat(allocDir); err != nil {
   814  		t.Fatalf("err: %s", err)
   815  	}
   816  }
   817  
   818  func TestClient_BlockedAllocations(t *testing.T) {
   819  	s1, _ := testServer(t, nil)
   820  	defer s1.Shutdown()
   821  	testutil.WaitForLeader(t, s1.RPC)
   822  
   823  	c1 := testClient(t, func(c *config.Config) {
   824  		c.RPCHandler = s1
   825  	})
   826  	defer c1.Shutdown()
   827  
   828  	// Wait for the node to be ready
   829  	state := s1.State()
   830  	testutil.WaitForResult(func() (bool, error) {
   831  		ws := memdb.NewWatchSet()
   832  		out, err := state.NodeByID(ws, c1.Node().ID)
   833  		if err != nil {
   834  			return false, err
   835  		}
   836  		if out == nil || out.Status != structs.NodeStatusReady {
   837  			return false, fmt.Errorf("bad node: %#v", out)
   838  		}
   839  		return true, nil
   840  	}, func(err error) {
   841  		t.Fatalf("err: %v", err)
   842  	})
   843  
   844  	// Add an allocation
   845  	alloc := mock.Alloc()
   846  	alloc.NodeID = c1.Node().ID
   847  	alloc.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
   848  	alloc.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   849  		"kill_after":  "1s",
   850  		"run_for":     "100s",
   851  		"exit_code":   0,
   852  		"exit_signal": 0,
   853  		"exit_err":    "",
   854  	}
   855  
   856  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
   857  	state.UpsertAllocs(100, []*structs.Allocation{alloc})
   858  
   859  	// Wait until the client downloads and starts the allocation
   860  	testutil.WaitForResult(func() (bool, error) {
   861  		ws := memdb.NewWatchSet()
   862  		out, err := state.AllocByID(ws, alloc.ID)
   863  		if err != nil {
   864  			return false, err
   865  		}
   866  		if out == nil || out.ClientStatus != structs.AllocClientStatusRunning {
   867  			return false, fmt.Errorf("bad alloc: %#v", out)
   868  		}
   869  		return true, nil
   870  	}, func(err error) {
   871  		t.Fatalf("err: %v", err)
   872  	})
   873  
   874  	// Add a new chained alloc
   875  	alloc2 := alloc.Copy()
   876  	alloc2.ID = structs.GenerateUUID()
   877  	alloc2.Job = alloc.Job
   878  	alloc2.JobID = alloc.JobID
   879  	alloc2.PreviousAllocation = alloc.ID
   880  	if err := state.UpsertAllocs(200, []*structs.Allocation{alloc2}); err != nil {
   881  		t.Fatalf("err: %v", err)
   882  	}
   883  
   884  	// Enusre that the chained allocation is being tracked as blocked
   885  	testutil.WaitForResult(func() (bool, error) {
   886  		alloc, ok := c1.blockedAllocations[alloc2.PreviousAllocation]
   887  		if ok && alloc.ID == alloc2.ID {
   888  			return true, nil
   889  		}
   890  		return false, fmt.Errorf("no blocked allocations")
   891  	}, func(err error) {
   892  		t.Fatalf("err: %v", err)
   893  	})
   894  
   895  	// Change the desired state of the parent alloc to stop
   896  	alloc1 := alloc.Copy()
   897  	alloc1.DesiredStatus = structs.AllocDesiredStatusStop
   898  	if err := state.UpsertAllocs(300, []*structs.Allocation{alloc1}); err != nil {
   899  		t.Fatalf("err: %v", err)
   900  	}
   901  
   902  	// Ensure that there are no blocked allocations
   903  	testutil.WaitForResult(func() (bool, error) {
   904  		_, ok := c1.blockedAllocations[alloc2.PreviousAllocation]
   905  		if ok {
   906  			return false, fmt.Errorf("blocked evals present")
   907  		}
   908  		return true, nil
   909  	}, func(err error) {
   910  		t.Fatalf("err: %v", err)
   911  	})
   912  
   913  	// Destroy all the allocations
   914  	for _, ar := range c1.getAllocRunners() {
   915  		ar.Destroy()
   916  	}
   917  
   918  	for _, ar := range c1.getAllocRunners() {
   919  		<-ar.WaitCh()
   920  	}
   921  }
   922  
   923  func TestClient_UnarchiveAllocDir(t *testing.T) {
   924  	dir, err := ioutil.TempDir("", "")
   925  	if err != nil {
   926  		t.Fatalf("err: %v", err)
   927  	}
   928  	defer os.RemoveAll(dir)
   929  
   930  	if err := os.Mkdir(filepath.Join(dir, "foo"), 0777); err != nil {
   931  		t.Fatalf("err: %v", err)
   932  	}
   933  	dirInfo, err := os.Stat(filepath.Join(dir, "foo"))
   934  	if err != nil {
   935  		t.Fatalf("err: %v", err)
   936  	}
   937  	f, err := os.Create(filepath.Join(dir, "foo", "bar"))
   938  	if err != nil {
   939  		t.Fatalf("err: %v", err)
   940  	}
   941  	if _, err := f.WriteString("foo"); err != nil {
   942  		t.Fatalf("err: %v", err)
   943  	}
   944  	if err := f.Chmod(0644); err != nil {
   945  		t.Fatalf("err: %v", err)
   946  	}
   947  	fInfo, err := f.Stat()
   948  	if err != nil {
   949  		t.Fatalf("err: %v", err)
   950  	}
   951  	f.Close()
   952  
   953  	buf := new(bytes.Buffer)
   954  	tw := tar.NewWriter(buf)
   955  
   956  	walkFn := func(path string, fileInfo os.FileInfo, err error) error {
   957  		// Ignore if the file is a symlink
   958  		if fileInfo.Mode() == os.ModeSymlink {
   959  			return nil
   960  		}
   961  
   962  		// Include the path of the file name relative to the alloc dir
   963  		// so that we can put the files in the right directories
   964  		hdr, err := tar.FileInfoHeader(fileInfo, "")
   965  		if err != nil {
   966  			return fmt.Errorf("error creating file header: %v", err)
   967  		}
   968  		hdr.Name = fileInfo.Name()
   969  		tw.WriteHeader(hdr)
   970  
   971  		// If it's a directory we just write the header into the tar
   972  		if fileInfo.IsDir() {
   973  			return nil
   974  		}
   975  
   976  		// Write the file into the archive
   977  		file, err := os.Open(path)
   978  		if err != nil {
   979  			return err
   980  		}
   981  		defer file.Close()
   982  
   983  		if _, err := io.Copy(tw, file); err != nil {
   984  			return err
   985  		}
   986  
   987  		return nil
   988  	}
   989  
   990  	if err := filepath.Walk(dir, walkFn); err != nil {
   991  		t.Fatalf("err: %v", err)
   992  	}
   993  	tw.Close()
   994  
   995  	dir1, err := ioutil.TempDir("", "")
   996  	if err != nil {
   997  		t.Fatalf("err: %v", err)
   998  	}
   999  	defer os.RemoveAll(dir1)
  1000  
  1001  	c1 := testClient(t, func(c *config.Config) {
  1002  		c.RPCHandler = nil
  1003  	})
  1004  	defer c1.Shutdown()
  1005  
  1006  	rc := ioutil.NopCloser(buf)
  1007  
  1008  	c1.migratingAllocs["123"] = newMigrateAllocCtrl(mock.Alloc())
  1009  	if err := c1.unarchiveAllocDir(rc, "123", dir1); err != nil {
  1010  		t.Fatalf("err: %v", err)
  1011  	}
  1012  
  1013  	// Ensure foo is present
  1014  	fi, err := os.Stat(filepath.Join(dir1, "foo"))
  1015  	if err != nil {
  1016  		t.Fatalf("err: %v", err)
  1017  	}
  1018  	if fi.Mode() != dirInfo.Mode() {
  1019  		t.Fatalf("mode: %v", fi.Mode())
  1020  	}
  1021  
  1022  	fi1, err := os.Stat(filepath.Join(dir1, "bar"))
  1023  	if err != nil {
  1024  		t.Fatalf("err: %v", err)
  1025  	}
  1026  	if fi1.Mode() != fInfo.Mode() {
  1027  		t.Fatalf("mode: %v", fi1.Mode())
  1028  	}
  1029  }