github.com/manicqin/nomad@v0.9.5/command/node_drain_test.go (about)

     1  package command
     2  
     3  import (
     4  	"bytes"
     5  	"fmt"
     6  	"strings"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/hashicorp/nomad/api"
    11  	"github.com/hashicorp/nomad/command/agent"
    12  	"github.com/hashicorp/nomad/helper"
    13  	"github.com/hashicorp/nomad/testutil"
    14  	"github.com/mitchellh/cli"
    15  	"github.com/posener/complete"
    16  	"github.com/stretchr/testify/assert"
    17  	"github.com/stretchr/testify/require"
    18  )
    19  
    20  func TestNodeDrainCommand_Implements(t *testing.T) {
    21  	t.Parallel()
    22  	var _ cli.Command = &NodeDrainCommand{}
    23  }
    24  
    25  func TestNodeDrainCommand_Detach(t *testing.T) {
    26  	t.Parallel()
    27  	require := require.New(t)
    28  	server, client, url := testServer(t, true, func(c *agent.Config) {
    29  		c.NodeName = "drain_detach_node"
    30  	})
    31  	defer server.Shutdown()
    32  
    33  	// Wait for a node to appear
    34  	var nodeID string
    35  	testutil.WaitForResult(func() (bool, error) {
    36  		nodes, _, err := client.Nodes().List(nil)
    37  		if err != nil {
    38  			return false, err
    39  		}
    40  		if len(nodes) == 0 {
    41  			return false, fmt.Errorf("missing node")
    42  		}
    43  		nodeID = nodes[0].ID
    44  		return true, nil
    45  	}, func(err error) {
    46  		t.Fatalf("err: %s", err)
    47  	})
    48  
    49  	// Register a job to create an alloc to drain that will block draining
    50  	job := &api.Job{
    51  		ID:          helper.StringToPtr("mock_service"),
    52  		Name:        helper.StringToPtr("mock_service"),
    53  		Datacenters: []string{"dc1"},
    54  		TaskGroups: []*api.TaskGroup{
    55  			{
    56  				Name: helper.StringToPtr("mock_group"),
    57  				Tasks: []*api.Task{
    58  					{
    59  						Name:   "mock_task",
    60  						Driver: "mock_driver",
    61  						Config: map[string]interface{}{
    62  							"run_for": "10m",
    63  						},
    64  					},
    65  				},
    66  			},
    67  		},
    68  	}
    69  
    70  	_, _, err := client.Jobs().Register(job, nil)
    71  	require.Nil(err)
    72  
    73  	testutil.WaitForResult(func() (bool, error) {
    74  		allocs, _, err := client.Nodes().Allocations(nodeID, nil)
    75  		if err != nil {
    76  			return false, err
    77  		}
    78  		return len(allocs) > 0, fmt.Errorf("no allocs")
    79  	}, func(err error) {
    80  		t.Fatalf("err: %v", err)
    81  	})
    82  
    83  	ui := new(cli.MockUi)
    84  	cmd := &NodeDrainCommand{Meta: Meta{Ui: ui}}
    85  	if code := cmd.Run([]string{"-address=" + url, "-self", "-enable", "-detach"}); code != 0 {
    86  		t.Fatalf("expected exit 0, got: %d", code)
    87  	}
    88  
    89  	out := ui.OutputWriter.String()
    90  	expected := "drain strategy set"
    91  	require.Contains(out, expected)
    92  
    93  	node, _, err := client.Nodes().Info(nodeID, nil)
    94  	require.Nil(err)
    95  	require.NotNil(node.DrainStrategy)
    96  }
    97  
    98  func TestNodeDrainCommand_Monitor(t *testing.T) {
    99  	t.Parallel()
   100  	require := require.New(t)
   101  	server, client, url := testServer(t, true, func(c *agent.Config) {
   102  		c.NodeName = "drain_monitor_node"
   103  	})
   104  	defer server.Shutdown()
   105  
   106  	// Wait for a node to appear
   107  	var nodeID string
   108  	testutil.WaitForResult(func() (bool, error) {
   109  		nodes, _, err := client.Nodes().List(nil)
   110  		if err != nil {
   111  			return false, err
   112  		}
   113  		if len(nodes) == 0 {
   114  			return false, fmt.Errorf("missing node")
   115  		}
   116  		if _, ok := nodes[0].Drivers["mock_driver"]; !ok {
   117  			return false, fmt.Errorf("mock_driver not ready")
   118  		}
   119  		nodeID = nodes[0].ID
   120  		return true, nil
   121  	}, func(err error) {
   122  		t.Fatalf("err: %s", err)
   123  	})
   124  
   125  	// Register a service job to create allocs to drain
   126  	serviceCount := 3
   127  	job := &api.Job{
   128  		ID:          helper.StringToPtr("mock_service"),
   129  		Name:        helper.StringToPtr("mock_service"),
   130  		Datacenters: []string{"dc1"},
   131  		Type:        helper.StringToPtr("service"),
   132  		TaskGroups: []*api.TaskGroup{
   133  			{
   134  				Name:  helper.StringToPtr("mock_group"),
   135  				Count: &serviceCount,
   136  				Migrate: &api.MigrateStrategy{
   137  					MaxParallel:     helper.IntToPtr(1),
   138  					HealthCheck:     helper.StringToPtr("task_states"),
   139  					MinHealthyTime:  helper.TimeToPtr(10 * time.Millisecond),
   140  					HealthyDeadline: helper.TimeToPtr(5 * time.Minute),
   141  				},
   142  				Tasks: []*api.Task{
   143  					{
   144  						Name:   "mock_task",
   145  						Driver: "mock_driver",
   146  						Config: map[string]interface{}{
   147  							"run_for": "10m",
   148  						},
   149  						Resources: &api.Resources{
   150  							CPU:      helper.IntToPtr(50),
   151  							MemoryMB: helper.IntToPtr(50),
   152  						},
   153  					},
   154  				},
   155  			},
   156  		},
   157  	}
   158  
   159  	_, _, err := client.Jobs().Register(job, nil)
   160  	require.Nil(err)
   161  
   162  	// Register a system job to ensure it is ignored during draining
   163  	sysjob := &api.Job{
   164  		ID:          helper.StringToPtr("mock_system"),
   165  		Name:        helper.StringToPtr("mock_system"),
   166  		Datacenters: []string{"dc1"},
   167  		Type:        helper.StringToPtr("system"),
   168  		TaskGroups: []*api.TaskGroup{
   169  			{
   170  				Name:  helper.StringToPtr("mock_sysgroup"),
   171  				Count: helper.IntToPtr(1),
   172  				Tasks: []*api.Task{
   173  					{
   174  						Name:   "mock_systask",
   175  						Driver: "mock_driver",
   176  						Config: map[string]interface{}{
   177  							"run_for": "10m",
   178  						},
   179  						Resources: &api.Resources{
   180  							CPU:      helper.IntToPtr(50),
   181  							MemoryMB: helper.IntToPtr(50),
   182  						},
   183  					},
   184  				},
   185  			},
   186  		},
   187  	}
   188  
   189  	_, _, err = client.Jobs().Register(sysjob, nil)
   190  	require.Nil(err)
   191  
   192  	var allocs []*api.Allocation
   193  	testutil.WaitForResult(func() (bool, error) {
   194  		allocs, _, err = client.Nodes().Allocations(nodeID, nil)
   195  		if err != nil {
   196  			return false, err
   197  		}
   198  		if len(allocs) != serviceCount+1 {
   199  			return false, fmt.Errorf("number of allocs %d != count (%d)", len(allocs), serviceCount+1)
   200  		}
   201  		for _, a := range allocs {
   202  			if a.ClientStatus != "running" {
   203  				return false, fmt.Errorf("alloc %q still not running: %s", a.ID, a.ClientStatus)
   204  			}
   205  		}
   206  		return true, nil
   207  	}, func(err error) {
   208  		t.Fatalf("err: %v", err)
   209  	})
   210  
   211  	outBuf := bytes.NewBuffer(nil)
   212  	ui := &cli.BasicUi{
   213  		Reader:      bytes.NewReader(nil),
   214  		Writer:      outBuf,
   215  		ErrorWriter: outBuf,
   216  	}
   217  	cmd := &NodeDrainCommand{Meta: Meta{Ui: ui}}
   218  	args := []string{"-address=" + url, "-self", "-enable", "-deadline", "1s", "-ignore-system"}
   219  	t.Logf("Running: %v", args)
   220  	require.Zero(cmd.Run(args))
   221  
   222  	out := outBuf.String()
   223  	t.Logf("Output:\n%s", out)
   224  
   225  	// Unfortunately travis is too slow to reliably see the expected output. The
   226  	// monitor goroutines may start only after some or all the allocs have been
   227  	// migrated.
   228  	if !testutil.IsTravis() {
   229  		require.Contains(out, "Drain complete for node")
   230  		for _, a := range allocs {
   231  			if *a.Job.Type == "system" {
   232  				if strings.Contains(out, a.ID) {
   233  					t.Fatalf("output should not contain system alloc %q", a.ID)
   234  				}
   235  				continue
   236  			}
   237  			require.Contains(out, fmt.Sprintf("Alloc %q marked for migration", a.ID))
   238  			require.Contains(out, fmt.Sprintf("Alloc %q draining", a.ID))
   239  		}
   240  
   241  		expected := fmt.Sprintf("All allocations on node %q have stopped\n", nodeID)
   242  		if !strings.HasSuffix(out, expected) {
   243  			t.Fatalf("expected output to end with:\n%s", expected)
   244  		}
   245  	}
   246  
   247  	// Test -monitor flag
   248  	outBuf.Reset()
   249  	args = []string{"-address=" + url, "-self", "-monitor", "-ignore-system"}
   250  	t.Logf("Running: %v", args)
   251  	require.Zero(cmd.Run(args))
   252  
   253  	out = outBuf.String()
   254  	t.Logf("Output:\n%s", out)
   255  	require.Contains(out, "No drain strategy set")
   256  }
   257  
   258  func TestNodeDrainCommand_Monitor_NoDrainStrategy(t *testing.T) {
   259  	t.Parallel()
   260  	require := require.New(t)
   261  	server, client, url := testServer(t, true, func(c *agent.Config) {
   262  		c.NodeName = "drain_monitor_node2"
   263  	})
   264  	defer server.Shutdown()
   265  
   266  	// Wait for a node to appear
   267  	testutil.WaitForResult(func() (bool, error) {
   268  		nodes, _, err := client.Nodes().List(nil)
   269  		if err != nil {
   270  			return false, err
   271  		}
   272  		if len(nodes) == 0 {
   273  			return false, fmt.Errorf("missing node")
   274  		}
   275  		return true, nil
   276  	}, func(err error) {
   277  		t.Fatalf("err: %s", err)
   278  	})
   279  
   280  	// Test -monitor flag
   281  	outBuf := bytes.NewBuffer(nil)
   282  	ui := &cli.BasicUi{
   283  		Reader:      bytes.NewReader(nil),
   284  		Writer:      outBuf,
   285  		ErrorWriter: outBuf,
   286  	}
   287  	cmd := &NodeDrainCommand{Meta: Meta{Ui: ui}}
   288  	args := []string{"-address=" + url, "-self", "-monitor", "-ignore-system"}
   289  	t.Logf("Running: %v", args)
   290  	if code := cmd.Run(args); code != 0 {
   291  		t.Fatalf("expected exit 0, got: %d\n%s", code, outBuf.String())
   292  	}
   293  
   294  	out := outBuf.String()
   295  	t.Logf("Output:\n%s", out)
   296  
   297  	require.Contains(out, "No drain strategy set")
   298  }
   299  
   300  func TestNodeDrainCommand_Fails(t *testing.T) {
   301  	t.Parallel()
   302  	srv, _, url := testServer(t, false, nil)
   303  	defer srv.Shutdown()
   304  
   305  	ui := new(cli.MockUi)
   306  	cmd := &NodeDrainCommand{Meta: Meta{Ui: ui}}
   307  
   308  	// Fails on misuse
   309  	if code := cmd.Run([]string{"some", "bad", "args"}); code != 1 {
   310  		t.Fatalf("expected exit code 1, got: %d", code)
   311  	}
   312  	if out := ui.ErrorWriter.String(); !strings.Contains(out, commandErrorText(cmd)) {
   313  		t.Fatalf("expected help output, got: %s", out)
   314  	}
   315  	ui.ErrorWriter.Reset()
   316  
   317  	// Fails on connection failure
   318  	if code := cmd.Run([]string{"-address=nope", "-enable", "12345678-abcd-efab-cdef-123456789abc"}); code != 1 {
   319  		t.Fatalf("expected exit code 1, got: %d", code)
   320  	}
   321  	if out := ui.ErrorWriter.String(); !strings.Contains(out, "Error toggling") {
   322  		t.Fatalf("expected failed toggle error, got: %s", out)
   323  	}
   324  	ui.ErrorWriter.Reset()
   325  
   326  	// Fails on nonexistent node
   327  	if code := cmd.Run([]string{"-address=" + url, "-enable", "12345678-abcd-efab-cdef-123456789abc"}); code != 1 {
   328  		t.Fatalf("expected exit 1, got: %d", code)
   329  	}
   330  	if out := ui.ErrorWriter.String(); !strings.Contains(out, "No node(s) with prefix or id") {
   331  		t.Fatalf("expected not exist error, got: %s", out)
   332  	}
   333  	ui.ErrorWriter.Reset()
   334  
   335  	// Fails if both enable and disable specified
   336  	if code := cmd.Run([]string{"-enable", "-disable", "12345678-abcd-efab-cdef-123456789abc"}); code != 1 {
   337  		t.Fatalf("expected exit 1, got: %d", code)
   338  	}
   339  	if out := ui.ErrorWriter.String(); !strings.Contains(out, commandErrorText(cmd)) {
   340  		t.Fatalf("expected help output, got: %s", out)
   341  	}
   342  	ui.ErrorWriter.Reset()
   343  
   344  	// Fails if neither enable or disable specified
   345  	if code := cmd.Run([]string{"12345678-abcd-efab-cdef-123456789abc"}); code != 1 {
   346  		t.Fatalf("expected exit 1, got: %d", code)
   347  	}
   348  	if out := ui.ErrorWriter.String(); !strings.Contains(out, commandErrorText(cmd)) {
   349  		t.Fatalf("expected help output, got: %s", out)
   350  	}
   351  	ui.ErrorWriter.Reset()
   352  
   353  	// Fail on identifier with too few characters
   354  	if code := cmd.Run([]string{"-address=" + url, "-enable", "1"}); code != 1 {
   355  		t.Fatalf("expected exit 1, got: %d", code)
   356  	}
   357  	if out := ui.ErrorWriter.String(); !strings.Contains(out, "must contain at least two characters.") {
   358  		t.Fatalf("expected too few characters error, got: %s", out)
   359  	}
   360  	ui.ErrorWriter.Reset()
   361  
   362  	// Identifiers with uneven length should produce a query result
   363  	if code := cmd.Run([]string{"-address=" + url, "-enable", "123"}); code != 1 {
   364  		t.Fatalf("expected exit 1, got: %d", code)
   365  	}
   366  	if out := ui.ErrorWriter.String(); !strings.Contains(out, "No node(s) with prefix or id") {
   367  		t.Fatalf("expected not exist error, got: %s", out)
   368  	}
   369  	ui.ErrorWriter.Reset()
   370  
   371  	// Fail on disable being used with drain strategy flags
   372  	for _, flag := range []string{"-force", "-no-deadline", "-ignore-system"} {
   373  		if code := cmd.Run([]string{"-address=" + url, "-disable", flag, "12345678-abcd-efab-cdef-123456789abc"}); code != 1 {
   374  			t.Fatalf("expected exit 1, got: %d", code)
   375  		}
   376  		if out := ui.ErrorWriter.String(); !strings.Contains(out, "combined with flags configuring drain strategy") {
   377  			t.Fatalf("got: %s", out)
   378  		}
   379  		ui.ErrorWriter.Reset()
   380  	}
   381  
   382  	// Fail on setting a deadline plus deadline modifying flags
   383  	for _, flag := range []string{"-force", "-no-deadline"} {
   384  		if code := cmd.Run([]string{"-address=" + url, "-enable", "-deadline=10s", flag, "12345678-abcd-efab-cdef-123456789abc"}); code != 1 {
   385  			t.Fatalf("expected exit 1, got: %d", code)
   386  		}
   387  		if out := ui.ErrorWriter.String(); !strings.Contains(out, "deadline can't be combined with") {
   388  			t.Fatalf("got: %s", out)
   389  		}
   390  		ui.ErrorWriter.Reset()
   391  	}
   392  
   393  	// Fail on setting a force and no deadline
   394  	if code := cmd.Run([]string{"-address=" + url, "-enable", "-force", "-no-deadline", "12345678-abcd-efab-cdef-123456789abc"}); code != 1 {
   395  		t.Fatalf("expected exit 1, got: %d", code)
   396  	}
   397  	if out := ui.ErrorWriter.String(); !strings.Contains(out, "mutually exclusive") {
   398  		t.Fatalf("got: %s", out)
   399  	}
   400  	ui.ErrorWriter.Reset()
   401  
   402  	// Fail on setting a bad deadline
   403  	for _, flag := range []string{"-deadline=0s", "-deadline=-1s"} {
   404  		if code := cmd.Run([]string{"-address=" + url, "-enable", flag, "12345678-abcd-efab-cdef-123456789abc"}); code != 1 {
   405  			t.Fatalf("expected exit 1, got: %d", code)
   406  		}
   407  		if out := ui.ErrorWriter.String(); !strings.Contains(out, "positive") {
   408  			t.Fatalf("got: %s", out)
   409  		}
   410  		ui.ErrorWriter.Reset()
   411  	}
   412  }
   413  
   414  func TestNodeDrainCommand_AutocompleteArgs(t *testing.T) {
   415  	assert := assert.New(t)
   416  	t.Parallel()
   417  
   418  	srv, client, url := testServer(t, true, nil)
   419  	defer srv.Shutdown()
   420  
   421  	// Wait for a node to appear
   422  	var nodeID string
   423  	testutil.WaitForResult(func() (bool, error) {
   424  		nodes, _, err := client.Nodes().List(nil)
   425  		if err != nil {
   426  			return false, err
   427  		}
   428  		if len(nodes) == 0 {
   429  			return false, fmt.Errorf("missing node")
   430  		}
   431  		nodeID = nodes[0].ID
   432  		return true, nil
   433  	}, func(err error) {
   434  		t.Fatalf("err: %s", err)
   435  	})
   436  
   437  	ui := new(cli.MockUi)
   438  	cmd := &NodeDrainCommand{Meta: Meta{Ui: ui, flagAddress: url}}
   439  
   440  	prefix := nodeID[:len(nodeID)-5]
   441  	args := complete.Args{Last: prefix}
   442  	predictor := cmd.AutocompleteArgs()
   443  
   444  	res := predictor.Predict(args)
   445  	assert.Equal(1, len(res))
   446  	assert.Equal(nodeID, res[0])
   447  }