github.phpd.cn/thought-machine/please@v12.2.0+incompatible/tools/cache/cluster/cluster_test.go (about)

     1  package cluster
     2  
     3  import (
     4  	"fmt"
     5  	"net"
     6  	"testing"
     7  
     8  	"github.com/stretchr/testify/assert"
     9  	"golang.org/x/net/context"
    10  	"google.golang.org/grpc"
    11  
    12  	pb "cache/proto/rpc_cache"
    13  	"cache/tools"
    14  )
    15  
    16  func TestBringUpCluster(t *testing.T) {
    17  	lis := openRPCPort(6995)
    18  	c1 := NewCluster(5995, 6995, "c1", "")
    19  	m1 := newRPCServer(c1, lis)
    20  	c1.Init(3)
    21  	log.Notice("Cluster seeded")
    22  
    23  	lis = openRPCPort(6996)
    24  	c2 := NewCluster(5996, 6996, "c2", "")
    25  	m2 := newRPCServer(c2, lis)
    26  	c2.Join([]string{"127.0.0.1:5995"})
    27  	log.Notice("c2 joined cluster")
    28  
    29  	expected := []*pb.Node{
    30  		{
    31  			Name:      "c1",
    32  			Address:   "127.0.0.1:6995",
    33  			HashBegin: tools.HashPoint(0, 3),
    34  			HashEnd:   tools.HashPoint(1, 3),
    35  		},
    36  		{
    37  			Name:      "c2",
    38  			Address:   "127.0.0.1:6996",
    39  			HashBegin: tools.HashPoint(1, 3),
    40  			HashEnd:   tools.HashPoint(2, 3),
    41  		},
    42  	}
    43  	// Both nodes should agree about the member list
    44  	assert.Equal(t, expected, c1.GetMembers())
    45  	assert.Equal(t, expected, c2.GetMembers())
    46  
    47  	lis = openRPCPort(6997)
    48  	c3 := NewCluster(5997, 6997, "c3", "")
    49  	m3 := newRPCServer(c2, lis)
    50  	c3.Join([]string{"127.0.0.1:5995", "127.0.0.1:5996"})
    51  
    52  	expected = []*pb.Node{
    53  		{
    54  			Name:      "c1",
    55  			Address:   "127.0.0.1:6995",
    56  			HashBegin: tools.HashPoint(0, 3),
    57  			HashEnd:   tools.HashPoint(1, 3),
    58  		},
    59  		{
    60  			Name:      "c2",
    61  			Address:   "127.0.0.1:6996",
    62  			HashBegin: tools.HashPoint(1, 3),
    63  			HashEnd:   tools.HashPoint(2, 3),
    64  		},
    65  		{
    66  			Name:      "c3",
    67  			Address:   "127.0.0.1:6997",
    68  			HashBegin: tools.HashPoint(2, 3),
    69  			HashEnd:   tools.HashPoint(3, 3),
    70  		},
    71  	}
    72  
    73  	// All three nodes should agree about the member list
    74  	assert.Equal(t, expected, c1.GetMembers())
    75  	assert.Equal(t, expected, c2.GetMembers())
    76  	assert.Equal(t, expected, c3.GetMembers())
    77  
    78  	assert.Equal(t, 0, m1.Replications)
    79  	assert.Equal(t, 0, m2.Replications)
    80  	assert.Equal(t, 0, m3.Replications)
    81  
    82  	// Now test replications.
    83  	c1.ReplicateArtifacts(&pb.StoreRequest{
    84  		Hash: []byte{0, 0, 0, 0},
    85  	})
    86  	// This replicates onto node 2 because that's got the relevant bit of the hash space.
    87  	assert.Equal(t, 0, m1.Replications)
    88  	assert.Equal(t, 1, m2.Replications)
    89  	assert.Equal(t, 0, m3.Replications)
    90  
    91  	// The same request going to node 2 should replicate it onto node 1.
    92  	c2.ReplicateArtifacts(&pb.StoreRequest{
    93  		Hash: []byte{0, 0, 0, 0},
    94  	})
    95  	assert.Equal(t, 1, m1.Replications)
    96  	assert.Equal(t, 1, m2.Replications)
    97  	assert.Equal(t, 0, m3.Replications)
    98  
    99  	// Delete requests should get replicated around the whole cluster (because they delete
   100  	// all hashes of an artifact, and so those could be anywhere).
   101  	c1.DeleteArtifacts(&pb.DeleteRequest{})
   102  	assert.Equal(t, 1, m1.Replications)
   103  	assert.Equal(t, 2, m2.Replications)
   104  	assert.Equal(t, 1, m3.Replications)
   105  	c2.DeleteArtifacts(&pb.DeleteRequest{})
   106  	assert.Equal(t, 2, m1.Replications)
   107  	assert.Equal(t, 2, m2.Replications)
   108  	assert.Equal(t, 2, m3.Replications)
   109  	c3.DeleteArtifacts(&pb.DeleteRequest{})
   110  	assert.Equal(t, 3, m1.Replications)
   111  	assert.Equal(t, 3, m2.Replications)
   112  	assert.Equal(t, 2, m3.Replications)
   113  }
   114  
   115  // mockRPCServer is a fake RPC server we use for this test.
   116  type mockRPCServer struct {
   117  	cluster      *Cluster
   118  	Replications int
   119  }
   120  
   121  func (r *mockRPCServer) Join(ctx context.Context, req *pb.JoinRequest) (*pb.JoinResponse, error) {
   122  	return r.cluster.AddNode(req), nil
   123  }
   124  
   125  func (r *mockRPCServer) Replicate(ctx context.Context, req *pb.ReplicateRequest) (*pb.ReplicateResponse, error) {
   126  	r.Replications++
   127  	return &pb.ReplicateResponse{Success: true}, nil
   128  }
   129  
   130  // openRPCPort opens a port for the gRPC server.
   131  // This is rather awkwardly split up from below to try to avoid races around the port opening.
   132  // There's something of a circular dependency between starting the gossip service (which triggers
   133  // RPC calls) and starting the gRPC server (which refers to said gossip service).
   134  func openRPCPort(port int) net.Listener {
   135  	lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
   136  	if err != nil {
   137  		log.Fatalf("Failed to listen on port %d: %v", port, err)
   138  	}
   139  	return lis
   140  }
   141  
   142  // newRPCServer creates a new mockRPCServer, starts a gRPC server running it, and returns it.
   143  // It's not possible to stop it again...
   144  func newRPCServer(cluster *Cluster, lis net.Listener) *mockRPCServer {
   145  	m := &mockRPCServer{cluster: cluster}
   146  	s := grpc.NewServer()
   147  	pb.RegisterRpcServerServer(s, m)
   148  	go s.Serve(lis)
   149  	return m
   150  }