github.com/tiagovtristao/plz@v13.4.0+incompatible/tools/cache/cluster/cluster_test.go (about)

     1  package cluster
     2  
     3  import (
     4  	"fmt"
     5  	"net"
     6  	"testing"
     7  
     8  	"github.com/stretchr/testify/assert"
     9  	"golang.org/x/net/context"
    10  	"google.golang.org/grpc"
    11  
    12  	pb "github.com/thought-machine/please/src/cache/proto/rpc_cache"
    13  	"github.com/thought-machine/please/src/cache/tools"
    14  )
    15  
    16  func TestBringUpCluster(t *testing.T) {
    17  	lis := openRPCPort(6995)
    18  	c1 := NewCluster(5995, 6995, "c1", "")
    19  	m1 := newRPCServer(c1, lis)
    20  	c1.Init(3)
    21  	log.Notice("Cluster seeded")
    22  
    23  	lis = openRPCPort(6996)
    24  	c2 := NewCluster(5996, 6996, "c2", "")
    25  	m2 := newRPCServer(c2, lis)
    26  	c2.Join([]string{"127.0.0.1:5995"})
    27  	log.Notice("c2 joined cluster")
    28  
    29  	// We want to get the address of the nodes; on a typical machine there are multiple
    30  	// available interfaces and memberlist will intelligently choose one. We don't want to
    31  	// try to second-guess their logic here so we sneakily grab whatever it thinks the
    32  	// local node's address is.
    33  	addr := c1.list.LocalNode().Addr.String()
    34  
    35  	expected := []*pb.Node{
    36  		{
    37  			Name:      "c1",
    38  			Address:   addr + ":6995",
    39  			HashBegin: tools.HashPoint(0, 3),
    40  			HashEnd:   tools.HashPoint(1, 3),
    41  		},
    42  		{
    43  			Name:      "c2",
    44  			Address:   addr + ":6996",
    45  			HashBegin: tools.HashPoint(1, 3),
    46  			HashEnd:   tools.HashPoint(2, 3),
    47  		},
    48  	}
    49  	// Both nodes should agree about the member list
    50  	assert.Equal(t, expected, c1.GetMembers())
    51  	assert.Equal(t, expected, c2.GetMembers())
    52  
    53  	lis = openRPCPort(6997)
    54  	c3 := NewCluster(5997, 6997, "c3", "")
    55  	m3 := newRPCServer(c2, lis)
    56  	c3.Join([]string{"127.0.0.1:5995", "127.0.0.1:5996"})
    57  
    58  	expected = []*pb.Node{
    59  		{
    60  			Name:      "c1",
    61  			Address:   addr + ":6995",
    62  			HashBegin: tools.HashPoint(0, 3),
    63  			HashEnd:   tools.HashPoint(1, 3),
    64  		},
    65  		{
    66  			Name:      "c2",
    67  			Address:   addr + ":6996",
    68  			HashBegin: tools.HashPoint(1, 3),
    69  			HashEnd:   tools.HashPoint(2, 3),
    70  		},
    71  		{
    72  			Name:      "c3",
    73  			Address:   addr + ":6997",
    74  			HashBegin: tools.HashPoint(2, 3),
    75  			HashEnd:   tools.HashPoint(3, 3),
    76  		},
    77  	}
    78  
    79  	// All three nodes should agree about the member list
    80  	assert.Equal(t, expected, c1.GetMembers())
    81  	assert.Equal(t, expected, c2.GetMembers())
    82  	assert.Equal(t, expected, c3.GetMembers())
    83  
    84  	assert.Equal(t, 0, m1.Replications)
    85  	assert.Equal(t, 0, m2.Replications)
    86  	assert.Equal(t, 0, m3.Replications)
    87  
    88  	// Now test replications.
    89  	c1.ReplicateArtifacts(&pb.StoreRequest{
    90  		Hash: []byte{0, 0, 0, 0},
    91  	})
    92  	// This replicates onto node 2 because that's got the relevant bit of the hash space.
    93  	assert.Equal(t, 0, m1.Replications)
    94  	assert.Equal(t, 1, m2.Replications)
    95  	assert.Equal(t, 0, m3.Replications)
    96  
    97  	// The same request going to node 2 should replicate it onto node 1.
    98  	c2.ReplicateArtifacts(&pb.StoreRequest{
    99  		Hash: []byte{0, 0, 0, 0},
   100  	})
   101  	assert.Equal(t, 1, m1.Replications)
   102  	assert.Equal(t, 1, m2.Replications)
   103  	assert.Equal(t, 0, m3.Replications)
   104  
   105  	// Delete requests should get replicated around the whole cluster (because they delete
   106  	// all hashes of an artifact, and so those could be anywhere).
   107  	c1.DeleteArtifacts(&pb.DeleteRequest{})
   108  	assert.Equal(t, 1, m1.Replications)
   109  	assert.Equal(t, 2, m2.Replications)
   110  	assert.Equal(t, 1, m3.Replications)
   111  	c2.DeleteArtifacts(&pb.DeleteRequest{})
   112  	assert.Equal(t, 2, m1.Replications)
   113  	assert.Equal(t, 2, m2.Replications)
   114  	assert.Equal(t, 2, m3.Replications)
   115  	c3.DeleteArtifacts(&pb.DeleteRequest{})
   116  	assert.Equal(t, 3, m1.Replications)
   117  	assert.Equal(t, 3, m2.Replications)
   118  	assert.Equal(t, 2, m3.Replications)
   119  }
   120  
   121  // mockRPCServer is a fake RPC server we use for this test.
   122  type mockRPCServer struct {
   123  	cluster      *Cluster
   124  	Replications int
   125  }
   126  
   127  func (r *mockRPCServer) Join(ctx context.Context, req *pb.JoinRequest) (*pb.JoinResponse, error) {
   128  	return r.cluster.AddNode(req), nil
   129  }
   130  
   131  func (r *mockRPCServer) Replicate(ctx context.Context, req *pb.ReplicateRequest) (*pb.ReplicateResponse, error) {
   132  	r.Replications++
   133  	return &pb.ReplicateResponse{Success: true}, nil
   134  }
   135  
   136  // openRPCPort opens a port for the gRPC server.
   137  // This is rather awkwardly split up from below to try to avoid races around the port opening.
   138  // There's something of a circular dependency between starting the gossip service (which triggers
   139  // RPC calls) and starting the gRPC server (which refers to said gossip service).
   140  func openRPCPort(port int) net.Listener {
   141  	lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
   142  	if err != nil {
   143  		log.Fatalf("Failed to listen on port %d: %v", port, err)
   144  	}
   145  	return lis
   146  }
   147  
   148  // newRPCServer creates a new mockRPCServer, starts a gRPC server running it, and returns it.
   149  // It's not possible to stop it again...
   150  func newRPCServer(cluster *Cluster, lis net.Listener) *mockRPCServer {
   151  	m := &mockRPCServer{cluster: cluster}
   152  	s := grpc.NewServer()
   153  	pb.RegisterRpcServerServer(s, m)
   154  	go s.Serve(lis)
   155  	return m
   156  }