github.com/anuvu/nomad@v0.8.7-atom1/api/nodes_test.go (about)

     1  package api
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"reflect"
     7  	"sort"
     8  	"strings"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/hashicorp/nomad/helper/uuid"
    13  	"github.com/hashicorp/nomad/nomad/structs"
    14  	"github.com/hashicorp/nomad/testutil"
    15  	"github.com/stretchr/testify/require"
    16  )
    17  
    18  func TestNodes_List(t *testing.T) {
    19  	t.Parallel()
    20  	c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
    21  		c.DevMode = true
    22  	})
    23  	defer s.Stop()
    24  	nodes := c.Nodes()
    25  
    26  	var qm *QueryMeta
    27  	var out []*NodeListStub
    28  	var err error
    29  
    30  	testutil.WaitForResult(func() (bool, error) {
    31  		out, qm, err = nodes.List(nil)
    32  		if err != nil {
    33  			return false, err
    34  		}
    35  		if n := len(out); n != 1 {
    36  			return false, fmt.Errorf("expected 1 node, got: %d", n)
    37  		}
    38  		return true, nil
    39  	}, func(err error) {
    40  		t.Fatalf("err: %s", err)
    41  	})
    42  
    43  	// Check that we got valid QueryMeta.
    44  	assertQueryMeta(t, qm)
    45  }
    46  
    47  func TestNodes_PrefixList(t *testing.T) {
    48  	t.Parallel()
    49  	c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
    50  		c.DevMode = true
    51  	})
    52  	defer s.Stop()
    53  	nodes := c.Nodes()
    54  
    55  	var qm *QueryMeta
    56  	var out []*NodeListStub
    57  	var err error
    58  
    59  	// Get the node ID
    60  	var nodeID string
    61  	testutil.WaitForResult(func() (bool, error) {
    62  		out, _, err := nodes.List(nil)
    63  		if err != nil {
    64  			return false, err
    65  		}
    66  		if n := len(out); n != 1 {
    67  			return false, fmt.Errorf("expected 1 node, got: %d", n)
    68  		}
    69  		nodeID = out[0].ID
    70  		return true, nil
    71  	}, func(err error) {
    72  		t.Fatalf("err: %s", err)
    73  	})
    74  
    75  	// Find node based on four character prefix
    76  	out, qm, err = nodes.PrefixList(nodeID[:4])
    77  	if err != nil {
    78  		t.Fatalf("err: %s", err)
    79  	}
    80  	if n := len(out); n != 1 {
    81  		t.Fatalf("expected 1 node, got: %d ", n)
    82  	}
    83  
    84  	// Check that we got valid QueryMeta.
    85  	assertQueryMeta(t, qm)
    86  }
    87  
    88  func TestNodes_Info(t *testing.T) {
    89  	t.Parallel()
    90  	startTime := time.Now().Unix()
    91  	c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
    92  		c.DevMode = true
    93  	})
    94  	defer s.Stop()
    95  	nodes := c.Nodes()
    96  
    97  	// Retrieving a nonexistent node returns error
    98  	_, _, err := nodes.Info("12345678-abcd-efab-cdef-123456789abc", nil)
    99  	if err == nil || !strings.Contains(err.Error(), "not found") {
   100  		t.Fatalf("expected not found error, got: %#v", err)
   101  	}
   102  
   103  	// Get the node ID
   104  	var nodeID, dc string
   105  	testutil.WaitForResult(func() (bool, error) {
   106  		out, _, err := nodes.List(nil)
   107  		if err != nil {
   108  			return false, err
   109  		}
   110  		if n := len(out); n != 1 {
   111  			return false, fmt.Errorf("expected 1 node, got: %d", n)
   112  		}
   113  		nodeID = out[0].ID
   114  		dc = out[0].Datacenter
   115  		return true, nil
   116  	}, func(err error) {
   117  		t.Fatalf("err: %s", err)
   118  	})
   119  
   120  	// Querying for existing nodes returns properly
   121  	result, qm, err := nodes.Info(nodeID, nil)
   122  	if err != nil {
   123  		t.Fatalf("err: %s", err)
   124  	}
   125  	assertQueryMeta(t, qm)
   126  
   127  	// Check that the result is what we expect
   128  	if result.ID != nodeID || result.Datacenter != dc {
   129  		t.Fatalf("expected %s (%s), got: %s (%s)",
   130  			nodeID, dc,
   131  			result.ID, result.Datacenter)
   132  	}
   133  
   134  	// Check that the StatusUpdatedAt field is being populated correctly
   135  	if result.StatusUpdatedAt < startTime {
   136  		t.Fatalf("start time: %v, status updated: %v", startTime, result.StatusUpdatedAt)
   137  	}
   138  
   139  	if len(result.Events) < 1 {
   140  		t.Fatalf("Expected at minimum the node register event to be populated: %+v", result)
   141  	}
   142  }
   143  
   144  func TestNodes_ToggleDrain(t *testing.T) {
   145  	t.Parallel()
   146  	require := require.New(t)
   147  	c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
   148  		c.DevMode = true
   149  	})
   150  	defer s.Stop()
   151  	nodes := c.Nodes()
   152  
   153  	// Wait for node registration and get the ID
   154  	var nodeID string
   155  	testutil.WaitForResult(func() (bool, error) {
   156  		out, _, err := nodes.List(nil)
   157  		if err != nil {
   158  			return false, err
   159  		}
   160  		if n := len(out); n != 1 {
   161  			return false, fmt.Errorf("expected 1 node, got: %d", n)
   162  		}
   163  		nodeID = out[0].ID
   164  		return true, nil
   165  	}, func(err error) {
   166  		t.Fatalf("err: %s", err)
   167  	})
   168  
   169  	// Check for drain mode
   170  	out, _, err := nodes.Info(nodeID, nil)
   171  	require.Nil(err)
   172  	if out.Drain {
   173  		t.Fatalf("drain mode should be off")
   174  	}
   175  
   176  	// Toggle it on
   177  	spec := &DrainSpec{
   178  		Deadline: 10 * time.Second,
   179  	}
   180  	drainOut, err := nodes.UpdateDrain(nodeID, spec, false, nil)
   181  	require.Nil(err)
   182  	assertWriteMeta(t, &drainOut.WriteMeta)
   183  
   184  	// Check again
   185  	out, _, err = nodes.Info(nodeID, nil)
   186  	require.Nil(err)
   187  	if out.SchedulingEligibility != structs.NodeSchedulingIneligible {
   188  		t.Fatalf("bad eligibility: %v vs %v", out.SchedulingEligibility, structs.NodeSchedulingIneligible)
   189  	}
   190  
   191  	// Toggle off again
   192  	drainOut, err = nodes.UpdateDrain(nodeID, nil, true, nil)
   193  	require.Nil(err)
   194  	assertWriteMeta(t, &drainOut.WriteMeta)
   195  
   196  	// Check again
   197  	out, _, err = nodes.Info(nodeID, nil)
   198  	require.Nil(err)
   199  	if out.Drain {
   200  		t.Fatalf("drain mode should be off")
   201  	}
   202  	if out.DrainStrategy != nil {
   203  		t.Fatalf("drain strategy should be unset")
   204  	}
   205  	if out.SchedulingEligibility != structs.NodeSchedulingEligible {
   206  		t.Fatalf("should be eligible")
   207  	}
   208  }
   209  
   210  func TestNodes_ToggleEligibility(t *testing.T) {
   211  	t.Parallel()
   212  	c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
   213  		c.DevMode = true
   214  	})
   215  	defer s.Stop()
   216  	nodes := c.Nodes()
   217  
   218  	// Wait for node registration and get the ID
   219  	var nodeID string
   220  	testutil.WaitForResult(func() (bool, error) {
   221  		out, _, err := nodes.List(nil)
   222  		if err != nil {
   223  			return false, err
   224  		}
   225  		if n := len(out); n != 1 {
   226  			return false, fmt.Errorf("expected 1 node, got: %d", n)
   227  		}
   228  		nodeID = out[0].ID
   229  		return true, nil
   230  	}, func(err error) {
   231  		t.Fatalf("err: %s", err)
   232  	})
   233  
   234  	// Check for eligibility
   235  	out, _, err := nodes.Info(nodeID, nil)
   236  	if err != nil {
   237  		t.Fatalf("err: %s", err)
   238  	}
   239  	if out.SchedulingEligibility != structs.NodeSchedulingEligible {
   240  		t.Fatalf("node should be eligible")
   241  	}
   242  
   243  	// Toggle it off
   244  	eligOut, err := nodes.ToggleEligibility(nodeID, false, nil)
   245  	if err != nil {
   246  		t.Fatalf("err: %s", err)
   247  	}
   248  	assertWriteMeta(t, &eligOut.WriteMeta)
   249  
   250  	// Check again
   251  	out, _, err = nodes.Info(nodeID, nil)
   252  	if err != nil {
   253  		t.Fatalf("err: %s", err)
   254  	}
   255  	if out.SchedulingEligibility != structs.NodeSchedulingIneligible {
   256  		t.Fatalf("bad eligibility: %v vs %v", out.SchedulingEligibility, structs.NodeSchedulingIneligible)
   257  	}
   258  
   259  	// Toggle on
   260  	eligOut, err = nodes.ToggleEligibility(nodeID, true, nil)
   261  	if err != nil {
   262  		t.Fatalf("err: %s", err)
   263  	}
   264  	assertWriteMeta(t, &eligOut.WriteMeta)
   265  
   266  	// Check again
   267  	out, _, err = nodes.Info(nodeID, nil)
   268  	if err != nil {
   269  		t.Fatalf("err: %s", err)
   270  	}
   271  	if out.SchedulingEligibility != structs.NodeSchedulingEligible {
   272  		t.Fatalf("bad eligibility: %v vs %v", out.SchedulingEligibility, structs.NodeSchedulingEligible)
   273  	}
   274  	if out.DrainStrategy != nil {
   275  		t.Fatalf("drain strategy should be unset")
   276  	}
   277  }
   278  
   279  func TestNodes_Allocations(t *testing.T) {
   280  	t.Parallel()
   281  	c, s := makeClient(t, nil, nil)
   282  	defer s.Stop()
   283  	nodes := c.Nodes()
   284  
   285  	// Looking up by a nonexistent node returns nothing. We
   286  	// don't check the index here because it's possible the node
   287  	// has already registered, in which case we will get a non-
   288  	// zero result anyways.
   289  	allocs, _, err := nodes.Allocations("nope", nil)
   290  	if err != nil {
   291  		t.Fatalf("err: %s", err)
   292  	}
   293  	if n := len(allocs); n != 0 {
   294  		t.Fatalf("expected 0 allocs, got: %d", n)
   295  	}
   296  }
   297  
   298  func TestNodes_ForceEvaluate(t *testing.T) {
   299  	t.Parallel()
   300  	c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
   301  		c.DevMode = true
   302  	})
   303  	defer s.Stop()
   304  	nodes := c.Nodes()
   305  
   306  	// Force-eval on a nonexistent node fails
   307  	_, _, err := nodes.ForceEvaluate("12345678-abcd-efab-cdef-123456789abc", nil)
   308  	if err == nil || !strings.Contains(err.Error(), "not found") {
   309  		t.Fatalf("expected not found error, got: %#v", err)
   310  	}
   311  
   312  	// Wait for node registration and get the ID
   313  	var nodeID string
   314  	testutil.WaitForResult(func() (bool, error) {
   315  		out, _, err := nodes.List(nil)
   316  		if err != nil {
   317  			return false, err
   318  		}
   319  		if n := len(out); n != 1 {
   320  			return false, fmt.Errorf("expected 1 node, got: %d", n)
   321  		}
   322  		nodeID = out[0].ID
   323  		return true, nil
   324  	}, func(err error) {
   325  		t.Fatalf("err: %s", err)
   326  	})
   327  
   328  	// Try force-eval again. We don't check the WriteMeta because
   329  	// there are no allocations to process, so we would get an index
   330  	// of zero. Same goes for the eval ID.
   331  	_, _, err = nodes.ForceEvaluate(nodeID, nil)
   332  	if err != nil {
   333  		t.Fatalf("err: %s", err)
   334  	}
   335  }
   336  
   337  func TestNodes_Sort(t *testing.T) {
   338  	t.Parallel()
   339  	nodes := []*NodeListStub{
   340  		{CreateIndex: 2},
   341  		{CreateIndex: 1},
   342  		{CreateIndex: 5},
   343  	}
   344  	sort.Sort(NodeIndexSort(nodes))
   345  
   346  	expect := []*NodeListStub{
   347  		{CreateIndex: 5},
   348  		{CreateIndex: 2},
   349  		{CreateIndex: 1},
   350  	}
   351  	if !reflect.DeepEqual(nodes, expect) {
   352  		t.Fatalf("\n\n%#v\n\n%#v", nodes, expect)
   353  	}
   354  }
   355  
   356  func TestNodes_GC(t *testing.T) {
   357  	t.Parallel()
   358  	require := require.New(t)
   359  	c, s := makeClient(t, nil, nil)
   360  	defer s.Stop()
   361  	nodes := c.Nodes()
   362  
   363  	err := nodes.GC(uuid.Generate(), nil)
   364  	require.NotNil(err)
   365  	require.True(structs.IsErrUnknownNode(err))
   366  }
   367  
   368  func TestNodes_GcAlloc(t *testing.T) {
   369  	t.Parallel()
   370  	require := require.New(t)
   371  	c, s := makeClient(t, nil, nil)
   372  	defer s.Stop()
   373  	nodes := c.Nodes()
   374  
   375  	err := nodes.GcAlloc(uuid.Generate(), nil)
   376  	require.NotNil(err)
   377  	require.True(structs.IsErrUnknownAllocation(err))
   378  }
   379  
   380  // Unittest monitorDrainMultiplex when an error occurs
   381  func TestNodes_MonitorDrain_Multiplex_Bad(t *testing.T) {
   382  	t.Parallel()
   383  	require := require.New(t)
   384  
   385  	ctx := context.Background()
   386  	multiplexCtx, cancel := context.WithCancel(ctx)
   387  
   388  	// monitorDrainMultiplex doesn't require anything on *Nodes, so we
   389  	// don't need to use a full Client
   390  	var nodeClient *Nodes
   391  
   392  	outCh := make(chan *MonitorMessage, 8)
   393  	nodeCh := make(chan *MonitorMessage, 1)
   394  	allocCh := make(chan *MonitorMessage, 8)
   395  	exitedCh := make(chan struct{})
   396  	go func() {
   397  		defer close(exitedCh)
   398  		nodeClient.monitorDrainMultiplex(ctx, cancel, outCh, nodeCh, allocCh)
   399  	}()
   400  
   401  	// Fake an alloc update
   402  	msg := Messagef(0, "alloc update")
   403  	allocCh <- msg
   404  	require.Equal(msg, <-outCh)
   405  
   406  	// Fake a node update
   407  	msg = Messagef(0, "node update")
   408  	nodeCh <- msg
   409  	require.Equal(msg, <-outCh)
   410  
   411  	// Fake an error that should shut everything down
   412  	msg = Messagef(MonitorMsgLevelError, "fake error")
   413  	nodeCh <- msg
   414  	require.Equal(msg, <-outCh)
   415  
   416  	_, ok := <-exitedCh
   417  	require.False(ok)
   418  
   419  	_, ok = <-outCh
   420  	require.False(ok)
   421  
   422  	// Exiting should also cancel the context that would be passed to the
   423  	// node & alloc watchers
   424  	select {
   425  	case <-multiplexCtx.Done():
   426  	case <-time.After(100 * time.Millisecond):
   427  		t.Fatalf("context wasn't canceled")
   428  	}
   429  
   430  }
   431  
   432  // Unittest monitorDrainMultiplex when drain finishes
   433  func TestNodes_MonitorDrain_Multiplex_Good(t *testing.T) {
   434  	t.Parallel()
   435  	require := require.New(t)
   436  
   437  	ctx := context.Background()
   438  	multiplexCtx, cancel := context.WithCancel(ctx)
   439  
   440  	// monitorDrainMultiplex doesn't require anything on *Nodes, so we
   441  	// don't need to use a full Client
   442  	var nodeClient *Nodes
   443  
   444  	outCh := make(chan *MonitorMessage, 8)
   445  	nodeCh := make(chan *MonitorMessage, 1)
   446  	allocCh := make(chan *MonitorMessage, 8)
   447  	exitedCh := make(chan struct{})
   448  	go func() {
   449  		defer close(exitedCh)
   450  		nodeClient.monitorDrainMultiplex(ctx, cancel, outCh, nodeCh, allocCh)
   451  	}()
   452  
   453  	// Fake a node updating and finishing
   454  	msg := Messagef(MonitorMsgLevelInfo, "node update")
   455  	nodeCh <- msg
   456  	close(nodeCh)
   457  	require.Equal(msg, <-outCh)
   458  
   459  	// Nothing else should have exited yet
   460  	select {
   461  	case msg, ok := <-outCh:
   462  		if ok {
   463  			t.Fatalf("unexpected output: %q", msg)
   464  		}
   465  		t.Fatalf("out channel closed unexpectedly")
   466  	case <-exitedCh:
   467  		t.Fatalf("multiplexer exited unexpectedly")
   468  	case <-multiplexCtx.Done():
   469  		t.Fatalf("multiplexer context canceled unexpectedly")
   470  	case <-time.After(10 * time.Millisecond):
   471  		t.Logf("multiplexer still running as expected")
   472  	}
   473  
   474  	// Fake an alloc update coming in after the node monitor has finished
   475  	msg = Messagef(0, "alloc update")
   476  	allocCh <- msg
   477  	require.Equal(msg, <-outCh)
   478  
   479  	// Closing the allocCh should cause everything to exit
   480  	close(allocCh)
   481  
   482  	_, ok := <-exitedCh
   483  	require.False(ok)
   484  
   485  	_, ok = <-outCh
   486  	require.False(ok)
   487  
   488  	// Exiting should also cancel the context that would be passed to the
   489  	// node & alloc watchers
   490  	select {
   491  	case <-multiplexCtx.Done():
   492  	case <-time.After(100 * time.Millisecond):
   493  		t.Fatalf("context wasn't canceled")
   494  	}
   495  
   496  }
   497  
   498  func TestNodes_DrainStrategy_Equal(t *testing.T) {
   499  	t.Parallel()
   500  	require := require.New(t)
   501  
   502  	// nil
   503  	var d *DrainStrategy
   504  	require.True(d.Equal(nil))
   505  
   506  	o := &DrainStrategy{}
   507  	require.False(d.Equal(o))
   508  	require.False(o.Equal(d))
   509  
   510  	d = &DrainStrategy{}
   511  	require.True(d.Equal(o))
   512  
   513  	// ForceDeadline
   514  	d.ForceDeadline = time.Now()
   515  	require.False(d.Equal(o))
   516  
   517  	o.ForceDeadline = d.ForceDeadline
   518  	require.True(d.Equal(o))
   519  
   520  	// Deadline
   521  	d.Deadline = 1
   522  	require.False(d.Equal(o))
   523  
   524  	o.Deadline = 1
   525  	require.True(d.Equal(o))
   526  
   527  	// IgnoreSystemJobs
   528  	d.IgnoreSystemJobs = true
   529  	require.False(d.Equal(o))
   530  
   531  	o.IgnoreSystemJobs = true
   532  	require.True(d.Equal(o))
   533  }