github.com/anth0d/nomad@v0.0.0-20221214183521-ae3a0a2cad06/client/servers/manager_internal_test.go (about)

     1  package servers
     2  
     3  import (
     4  	"fmt"
     5  	"math/rand"
     6  	"net"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/hashicorp/nomad/ci"
    11  	"github.com/hashicorp/nomad/helper/testlog"
    12  )
    13  
    14  func init() {
    15  	// Seed the random number generator
    16  	rand.Seed(time.Now().UnixNano())
    17  }
    18  
    19  type fauxAddr struct {
    20  	Addr string
    21  }
    22  
    23  func (fa *fauxAddr) String() string  { return fa.Addr }
    24  func (fa *fauxAddr) Network() string { return fa.Addr }
    25  
    26  type fauxConnPool struct {
    27  	// failPct between 0.0 and 1.0 == pct of time a Ping should fail
    28  	failPct float64
    29  }
    30  
    31  func (cp *fauxConnPool) Ping(net.Addr) error {
    32  	successProb := rand.Float64()
    33  	if successProb > cp.failPct {
    34  		return nil
    35  	}
    36  	return fmt.Errorf("bad server")
    37  }
    38  
    39  func testManager(t *testing.T) (m *Manager) {
    40  	logger := testlog.HCLogger(t)
    41  	shutdownCh := make(chan struct{})
    42  	m = New(logger, shutdownCh, &fauxConnPool{})
    43  	return m
    44  }
    45  
    46  func testManagerFailProb(t *testing.T, failPct float64) (m *Manager) {
    47  	logger := testlog.HCLogger(t)
    48  	shutdownCh := make(chan struct{})
    49  	m = New(logger, shutdownCh, &fauxConnPool{failPct: failPct})
    50  	return m
    51  }
    52  
    53  func TestManagerInternal_cycleServer(t *testing.T) {
    54  	ci.Parallel(t)
    55  
    56  	server0 := &Server{Addr: &fauxAddr{"server1"}}
    57  	server1 := &Server{Addr: &fauxAddr{"server2"}}
    58  	server2 := &Server{Addr: &fauxAddr{"server3"}}
    59  	srvs := Servers([]*Server{server0, server1, server2})
    60  
    61  	srvs.cycle()
    62  	if len(srvs) != 3 {
    63  		t.Fatalf("server length incorrect: %d/3", len(srvs))
    64  	}
    65  	if srvs[0] != server1 &&
    66  		srvs[1] != server2 &&
    67  		srvs[2] != server0 {
    68  		t.Fatalf("server ordering after one cycle not correct")
    69  	}
    70  
    71  	srvs.cycle()
    72  	if srvs[0] != server2 &&
    73  		srvs[1] != server0 &&
    74  		srvs[2] != server1 {
    75  		t.Fatalf("server ordering after two cycles not correct")
    76  	}
    77  
    78  	srvs.cycle()
    79  	if srvs[0] != server0 &&
    80  		srvs[1] != server1 &&
    81  		srvs[2] != server2 {
    82  		t.Fatalf("server ordering after three cycles not correct")
    83  	}
    84  }
    85  
    86  func TestManagerInternal_New(t *testing.T) {
    87  	ci.Parallel(t)
    88  
    89  	m := testManager(t)
    90  	if m == nil {
    91  		t.Fatalf("Manager nil")
    92  	}
    93  
    94  	if m.logger == nil {
    95  		t.Fatalf("Manager.logger nil")
    96  	}
    97  
    98  	if m.shutdownCh == nil {
    99  		t.Fatalf("Manager.shutdownCh nil")
   100  	}
   101  }
   102  
   103  // func (l *serverList) refreshServerRebalanceTimer() {
   104  func TestManagerInternal_refreshServerRebalanceTimer(t *testing.T) {
   105  	ci.Parallel(t)
   106  
   107  	type clusterSizes struct {
   108  		numNodes     int32
   109  		numServers   int
   110  		minRebalance time.Duration
   111  	}
   112  	clusters := []clusterSizes{
   113  		{1, 0, 5 * time.Minute}, // partitioned cluster
   114  		{1, 3, 5 * time.Minute},
   115  		{2, 3, 5 * time.Minute},
   116  		{100, 0, 5 * time.Minute}, // partitioned
   117  		{100, 1, 5 * time.Minute}, // partitioned
   118  		{100, 3, 5 * time.Minute},
   119  		{1024, 1, 5 * time.Minute}, // partitioned
   120  		{1024, 3, 5 * time.Minute}, // partitioned
   121  		{1024, 5, 5 * time.Minute},
   122  		{16384, 1, 4 * time.Minute}, // partitioned
   123  		{16384, 2, 5 * time.Minute}, // partitioned
   124  		{16384, 3, 5 * time.Minute}, // partitioned
   125  		{16384, 5, 5 * time.Minute},
   126  		{32768, 0, 5 * time.Minute}, // partitioned
   127  		{32768, 1, 8 * time.Minute}, // partitioned
   128  		{32768, 2, 3 * time.Minute}, // partitioned
   129  		{32768, 3, 5 * time.Minute}, // partitioned
   130  		{32768, 5, 3 * time.Minute}, // partitioned
   131  		{65535, 7, 5 * time.Minute},
   132  		{65535, 0, 5 * time.Minute}, // partitioned
   133  		{65535, 1, 8 * time.Minute}, // partitioned
   134  		{65535, 2, 3 * time.Minute}, // partitioned
   135  		{65535, 3, 5 * time.Minute}, // partitioned
   136  		{65535, 5, 3 * time.Minute}, // partitioned
   137  		{65535, 7, 5 * time.Minute},
   138  		{1000000, 1, 4 * time.Hour},     // partitioned
   139  		{1000000, 2, 2 * time.Hour},     // partitioned
   140  		{1000000, 3, 80 * time.Minute},  // partitioned
   141  		{1000000, 5, 50 * time.Minute},  // partitioned
   142  		{1000000, 11, 20 * time.Minute}, // partitioned
   143  		{1000000, 19, 10 * time.Minute},
   144  	}
   145  
   146  	logger := testlog.HCLogger(t)
   147  	shutdownCh := make(chan struct{})
   148  
   149  	for _, s := range clusters {
   150  		m := New(logger, shutdownCh, &fauxConnPool{})
   151  		m.SetNumNodes(s.numNodes)
   152  		servers := make([]*Server, 0, s.numServers)
   153  		for i := 0; i < s.numServers; i++ {
   154  			nodeName := fmt.Sprintf("s%02d", i)
   155  			servers = append(servers, &Server{Addr: &fauxAddr{nodeName}})
   156  		}
   157  		m.SetServers(servers)
   158  
   159  		d := m.refreshServerRebalanceTimer()
   160  		t.Logf("Nodes: %d; Servers: %d; Refresh: %v; Min: %v", s.numNodes, s.numServers, d, s.minRebalance)
   161  		if d < s.minRebalance {
   162  			t.Errorf("duration too short for cluster of size %d and %d servers (%s < %s)", s.numNodes, s.numServers, d, s.minRebalance)
   163  		}
   164  	}
   165  }