github.com/Ilhicas/nomad@v1.0.4-0.20210304152020-e86851182bc3/e2e/spread/spread.go (about)

     1  package spread
     2  
     3  import (
     4  	"fmt"
     5  	"strings"
     6  
     7  	"github.com/stretchr/testify/require"
     8  
     9  	"github.com/hashicorp/nomad/api"
    10  	"github.com/hashicorp/nomad/e2e/e2eutil"
    11  	"github.com/hashicorp/nomad/e2e/framework"
    12  	"github.com/hashicorp/nomad/helper/uuid"
    13  )
    14  
    15  type SpreadTest struct {
    16  	framework.TC
    17  	jobIds []string
    18  }
    19  
    20  func init() {
    21  	framework.AddSuites(&framework.TestSuite{
    22  		Component:   "Spread",
    23  		CanRunLocal: true,
    24  		Cases: []framework.TestCase{
    25  			new(SpreadTest),
    26  		},
    27  	})
    28  }
    29  
    30  func (tc *SpreadTest) BeforeAll(f *framework.F) {
    31  	// Ensure cluster has leader before running tests
    32  	e2eutil.WaitForLeader(f.T(), tc.Nomad())
    33  	e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 4)
    34  }
    35  
    36  func (tc *SpreadTest) TestEvenSpread(f *framework.F) {
    37  	nomadClient := tc.Nomad()
    38  	uuid := uuid.Generate()
    39  	jobId := "spread" + uuid[0:8]
    40  	tc.jobIds = append(tc.jobIds, jobId)
    41  	allocs := e2eutil.RegisterAndWaitForAllocs(f.T(), nomadClient, "spread/input/even_spread.nomad", jobId, "")
    42  
    43  	jobAllocs := nomadClient.Allocations()
    44  	dcToAllocs := make(map[string]int)
    45  	require := require.New(f.T())
    46  	// Verify spread score and alloc distribution
    47  	for _, allocStub := range allocs {
    48  		alloc, _, err := jobAllocs.Info(allocStub.ID, nil)
    49  		require.Nil(err)
    50  		require.NotEmpty(alloc.Metrics.ScoreMetaData)
    51  
    52  		node, _, err := nomadClient.Nodes().Info(alloc.NodeID, nil)
    53  		require.Nil(err)
    54  		dcToAllocs[node.Datacenter]++
    55  	}
    56  
    57  	expectedDcToAllocs := make(map[string]int)
    58  	expectedDcToAllocs["dc1"] = 3
    59  	expectedDcToAllocs["dc2"] = 3
    60  	require.Equal(expectedDcToAllocs, dcToAllocs)
    61  }
    62  
    63  func (tc *SpreadTest) TestMultipleSpreads(f *framework.F) {
    64  	nomadClient := tc.Nomad()
    65  	uuid := uuid.Generate()
    66  	jobId := "spread" + uuid[0:8]
    67  	tc.jobIds = append(tc.jobIds, jobId)
    68  	allocs := e2eutil.RegisterAndWaitForAllocs(f.T(), nomadClient, "spread/input/multiple_spread.nomad", jobId, "")
    69  
    70  	jobAllocs := nomadClient.Allocations()
    71  	dcToAllocs := make(map[string]int)
    72  	rackToAllocs := make(map[string]int)
    73  	allocMetrics := make(map[string]*api.AllocationMetric)
    74  
    75  	require := require.New(f.T())
    76  	// Verify spread score and alloc distribution
    77  	for _, allocStub := range allocs {
    78  		alloc, _, err := jobAllocs.Info(allocStub.ID, nil)
    79  
    80  		require.Nil(err)
    81  		require.NotEmpty(alloc.Metrics.ScoreMetaData)
    82  		allocMetrics[allocStub.ID] = alloc.Metrics
    83  
    84  		node, _, err := nomadClient.Nodes().Info(alloc.NodeID, nil)
    85  		require.Nil(err)
    86  		dcToAllocs[node.Datacenter]++
    87  		rack := node.Meta["rack"]
    88  		if rack != "" {
    89  			rackToAllocs[rack]++
    90  		}
    91  	}
    92  
    93  	expectedDcToAllocs := make(map[string]int)
    94  	expectedDcToAllocs["dc1"] = 5
    95  	expectedDcToAllocs["dc2"] = 5
    96  	require.Equal(expectedDcToAllocs, dcToAllocs, report(allocMetrics))
    97  
    98  	expectedRackToAllocs := make(map[string]int)
    99  	expectedRackToAllocs["r1"] = 7
   100  	expectedRackToAllocs["r2"] = 3
   101  	require.Equal(expectedRackToAllocs, rackToAllocs, report(allocMetrics))
   102  
   103  }
   104  
   105  func report(metrics map[string]*api.AllocationMetric) string {
   106  	var s strings.Builder
   107  	for allocID, m := range metrics {
   108  		s.WriteString("Alloc ID: " + allocID + "\n")
   109  		s.WriteString(fmt.Sprintf("  NodesEvaluated: %d\n", m.NodesEvaluated))
   110  		s.WriteString(fmt.Sprintf("  NodesAvailable: %#v\n", m.NodesAvailable))
   111  		s.WriteString(fmt.Sprintf("  ClassFiltered: %#v\n", m.ClassFiltered))
   112  		s.WriteString(fmt.Sprintf("  ConstraintFiltered: %#v\n", m.ConstraintFiltered))
   113  		s.WriteString(fmt.Sprintf("  NodesExhausted: %d\n", m.NodesExhausted))
   114  		s.WriteString(fmt.Sprintf("  ClassExhausted: %#v\n", m.ClassExhausted))
   115  		s.WriteString(fmt.Sprintf("  DimensionExhausted: %#v\n", m.DimensionExhausted))
   116  		s.WriteString(fmt.Sprintf("  QuotaExhausted: %#v\n", m.QuotaExhausted))
   117  		for _, nodeMeta := range m.ScoreMetaData {
   118  			s.WriteString(fmt.Sprintf("    NodeID: %s, NormScore: %f, Scores: %#v\n",
   119  				nodeMeta.NodeID, nodeMeta.NormScore, nodeMeta.Scores))
   120  		}
   121  	}
   122  	return s.String()
   123  }
   124  
   125  func (tc *SpreadTest) AfterEach(f *framework.F) {
   126  	nomadClient := tc.Nomad()
   127  	jobs := nomadClient.Jobs()
   128  	// Stop all jobs in test
   129  	for _, id := range tc.jobIds {
   130  		jobs.Deregister(id, true, nil)
   131  	}
   132  	// Garbage collect
   133  	nomadClient.System().GarbageCollect()
   134  }