github.com/djenriquez/nomad-1@v0.8.1/client/servers/manager_internal_test.go (about)

     1  package servers
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"math/rand"
     7  	"net"
     8  	"os"
     9  	"testing"
    10  	"time"
    11  )
    12  
    13  func init() {
    14  	// Seed the random number generator
    15  	rand.Seed(time.Now().UnixNano())
    16  }
    17  
    18  type fauxAddr struct {
    19  	Addr string
    20  }
    21  
    22  func (fa *fauxAddr) String() string  { return fa.Addr }
    23  func (fa *fauxAddr) Network() string { return fa.Addr }
    24  
    25  type fauxConnPool struct {
    26  	// failPct between 0.0 and 1.0 == pct of time a Ping should fail
    27  	failPct float64
    28  }
    29  
    30  func (cp *fauxConnPool) Ping(net.Addr) error {
    31  	successProb := rand.Float64()
    32  	if successProb > cp.failPct {
    33  		return nil
    34  	}
    35  	return fmt.Errorf("bad server")
    36  }
    37  
    38  func testManager(t *testing.T) (m *Manager) {
    39  	logger := log.New(os.Stderr, "", 0)
    40  	shutdownCh := make(chan struct{})
    41  	m = New(logger, shutdownCh, &fauxConnPool{})
    42  	return m
    43  }
    44  
    45  func testManagerFailProb(failPct float64) (m *Manager) {
    46  	logger := log.New(os.Stderr, "", 0)
    47  	shutdownCh := make(chan struct{})
    48  	m = New(logger, shutdownCh, &fauxConnPool{failPct: failPct})
    49  	return m
    50  }
    51  
    52  func TestManagerInternal_cycleServer(t *testing.T) {
    53  	server0 := &Server{Addr: &fauxAddr{"server1"}}
    54  	server1 := &Server{Addr: &fauxAddr{"server2"}}
    55  	server2 := &Server{Addr: &fauxAddr{"server3"}}
    56  	srvs := Servers([]*Server{server0, server1, server2})
    57  
    58  	srvs.cycle()
    59  	if len(srvs) != 3 {
    60  		t.Fatalf("server length incorrect: %d/3", len(srvs))
    61  	}
    62  	if srvs[0] != server1 &&
    63  		srvs[1] != server2 &&
    64  		srvs[2] != server0 {
    65  		t.Fatalf("server ordering after one cycle not correct")
    66  	}
    67  
    68  	srvs.cycle()
    69  	if srvs[0] != server2 &&
    70  		srvs[1] != server0 &&
    71  		srvs[2] != server1 {
    72  		t.Fatalf("server ordering after two cycles not correct")
    73  	}
    74  
    75  	srvs.cycle()
    76  	if srvs[0] != server0 &&
    77  		srvs[1] != server1 &&
    78  		srvs[2] != server2 {
    79  		t.Fatalf("server ordering after three cycles not correct")
    80  	}
    81  }
    82  
    83  func TestManagerInternal_New(t *testing.T) {
    84  	m := testManager(t)
    85  	if m == nil {
    86  		t.Fatalf("Manager nil")
    87  	}
    88  
    89  	if m.logger == nil {
    90  		t.Fatalf("Manager.logger nil")
    91  	}
    92  
    93  	if m.shutdownCh == nil {
    94  		t.Fatalf("Manager.shutdownCh nil")
    95  	}
    96  }
    97  
    98  // func (l *serverList) refreshServerRebalanceTimer() {
    99  func TestManagerInternal_refreshServerRebalanceTimer(t *testing.T) {
   100  	type clusterSizes struct {
   101  		numNodes     int32
   102  		numServers   int
   103  		minRebalance time.Duration
   104  	}
   105  	clusters := []clusterSizes{
   106  		{1, 0, 5 * time.Minute}, // partitioned cluster
   107  		{1, 3, 5 * time.Minute},
   108  		{2, 3, 5 * time.Minute},
   109  		{100, 0, 5 * time.Minute}, // partitioned
   110  		{100, 1, 5 * time.Minute}, // partitioned
   111  		{100, 3, 5 * time.Minute},
   112  		{1024, 1, 5 * time.Minute}, // partitioned
   113  		{1024, 3, 5 * time.Minute}, // partitioned
   114  		{1024, 5, 5 * time.Minute},
   115  		{16384, 1, 4 * time.Minute}, // partitioned
   116  		{16384, 2, 5 * time.Minute}, // partitioned
   117  		{16384, 3, 5 * time.Minute}, // partitioned
   118  		{16384, 5, 5 * time.Minute},
   119  		{32768, 0, 5 * time.Minute}, // partitioned
   120  		{32768, 1, 8 * time.Minute}, // partitioned
   121  		{32768, 2, 3 * time.Minute}, // partitioned
   122  		{32768, 3, 5 * time.Minute}, // partitioned
   123  		{32768, 5, 3 * time.Minute}, // partitioned
   124  		{65535, 7, 5 * time.Minute},
   125  		{65535, 0, 5 * time.Minute}, // partitioned
   126  		{65535, 1, 8 * time.Minute}, // partitioned
   127  		{65535, 2, 3 * time.Minute}, // partitioned
   128  		{65535, 3, 5 * time.Minute}, // partitioned
   129  		{65535, 5, 3 * time.Minute}, // partitioned
   130  		{65535, 7, 5 * time.Minute},
   131  		{1000000, 1, 4 * time.Hour},     // partitioned
   132  		{1000000, 2, 2 * time.Hour},     // partitioned
   133  		{1000000, 3, 80 * time.Minute},  // partitioned
   134  		{1000000, 5, 50 * time.Minute},  // partitioned
   135  		{1000000, 11, 20 * time.Minute}, // partitioned
   136  		{1000000, 19, 10 * time.Minute},
   137  	}
   138  
   139  	logger := log.New(os.Stderr, "", log.LstdFlags)
   140  	shutdownCh := make(chan struct{})
   141  
   142  	for _, s := range clusters {
   143  		m := New(logger, shutdownCh, &fauxConnPool{})
   144  		m.SetNumNodes(s.numNodes)
   145  		servers := make([]*Server, 0, s.numServers)
   146  		for i := 0; i < s.numServers; i++ {
   147  			nodeName := fmt.Sprintf("s%02d", i)
   148  			servers = append(servers, &Server{Addr: &fauxAddr{nodeName}})
   149  		}
   150  		m.SetServers(servers)
   151  
   152  		d := m.refreshServerRebalanceTimer()
   153  		t.Logf("Nodes: %d; Servers: %d; Refresh: %v; Min: %v", s.numNodes, s.numServers, d, s.minRebalance)
   154  		if d < s.minRebalance {
   155  			t.Errorf("duration too short for cluster of size %d and %d servers (%s < %s)", s.numNodes, s.numServers, d, s.minRebalance)
   156  		}
   157  	}
   158  }