github.com/hyperion-hyn/go-ethereum@v2.4.0+incompatible/raft/handler_test.go (about)

     1  package raft
     2  
     3  import (
     4  	"crypto/ecdsa"
     5  	"fmt"
     6  	"io/ioutil"
     7  	"net"
     8  	"os"
     9  	"reflect"
    10  	"testing"
    11  	"time"
    12  	"unsafe"
    13  
    14  	"github.com/ethereum/go-ethereum/core"
    15  	"github.com/ethereum/go-ethereum/crypto"
    16  	"github.com/ethereum/go-ethereum/eth"
    17  	"github.com/ethereum/go-ethereum/event"
    18  	"github.com/ethereum/go-ethereum/log"
    19  	"github.com/ethereum/go-ethereum/node"
    20  	"github.com/ethereum/go-ethereum/p2p"
    21  	"github.com/ethereum/go-ethereum/p2p/enode"
    22  	"github.com/ethereum/go-ethereum/params"
    23  )
    24  
    25  // pm.advanceAppliedIndex() and state updates are in different
    26  // transaction boundaries hence there's a probablity that they are
    27  // out of sync due to premature shutdown
    28  func IgnoreTestProtocolManager_whenAppliedIndexOutOfSync(t *testing.T) {
    29  	tmpWorkingDir, err := ioutil.TempDir("", "")
    30  	if err != nil {
    31  		t.Fatal(err)
    32  	}
    33  	defer func() {
    34  		_ = os.RemoveAll(tmpWorkingDir)
    35  	}()
    36  	count := 3
    37  	ports := make([]uint16, count)
    38  	nodeKeys := make([]*ecdsa.PrivateKey, count)
    39  	peers := make([]*enode.Node, count)
    40  	for i := 0; i < count; i++ {
    41  		ports[i] = nextPort(t)
    42  		nodeKeys[i] = mustNewNodeKey(t)
    43  		peers[i] = enode.NewV4(&(nodeKeys[i].PublicKey), net.IPv4(127, 0, 0, 1), 0, 0, int(ports[i]))
    44  	}
    45  	raftNodes := make([]*RaftService, count)
    46  	for i := 0; i < count; i++ {
    47  		if s, err := startRaftNode(uint16(i+1), ports[i], tmpWorkingDir, nodeKeys[i], peers); err != nil {
    48  			t.Fatal(err)
    49  		} else {
    50  			raftNodes[i] = s
    51  		}
    52  	}
    53  	waitFunc := func() {
    54  		for {
    55  			time.Sleep(200 * time.Millisecond)
    56  			for i := 0; i < count; i++ {
    57  				if raftNodes[i].raftProtocolManager.role == minterRole {
    58  					return
    59  				}
    60  			}
    61  		}
    62  	}
    63  	waitFunc()
    64  	// update the index to mimic the issue (set applied index behind for node 0)
    65  	raftNodes[0].raftProtocolManager.advanceAppliedIndex(1)
    66  	// now stop and restart the nodes
    67  	for i := 0; i < count; i++ {
    68  		if err := raftNodes[i].Stop(); err != nil {
    69  			t.Fatal(err)
    70  		}
    71  	}
    72  	log.Debug("restart raft cluster")
    73  	for i := 0; i < count; i++ {
    74  		if s, err := startRaftNode(uint16(i+1), ports[i], tmpWorkingDir, nodeKeys[i], peers); err != nil {
    75  			t.Fatal(err)
    76  		} else {
    77  			raftNodes[i] = s
    78  		}
    79  	}
    80  	waitFunc()
    81  }
    82  
    83  func mustNewNodeKey(t *testing.T) *ecdsa.PrivateKey {
    84  	k, err := crypto.GenerateKey()
    85  	if err != nil {
    86  		t.Fatal(err)
    87  	}
    88  	return k
    89  }
    90  
    91  func nextPort(t *testing.T) uint16 {
    92  	listener, err := net.Listen("tcp", ":0")
    93  	if err != nil {
    94  		t.Fatal(err)
    95  	}
    96  	return uint16(listener.Addr().(*net.TCPAddr).Port)
    97  }
    98  
    99  func prepareServiceContext(key *ecdsa.PrivateKey) (ctx *node.ServiceContext, cfg *node.Config, err error) {
   100  	defer func() {
   101  		if r := recover(); r != nil {
   102  			err = fmt.Errorf("%s", r)
   103  			ctx = nil
   104  			cfg = nil
   105  		}
   106  	}()
   107  	cfg = &node.Config{
   108  		P2P: p2p.Config{
   109  			PrivateKey: key,
   110  		},
   111  	}
   112  	ctx = &node.ServiceContext{
   113  		EventMux: new(event.TypeMux),
   114  	}
   115  	// config is private field so we need some workaround to set the value
   116  	configField := reflect.ValueOf(ctx).Elem().FieldByName("config")
   117  	configField = reflect.NewAt(configField.Type(), unsafe.Pointer(configField.UnsafeAddr())).Elem()
   118  	configField.Set(reflect.ValueOf(cfg))
   119  	return
   120  }
   121  
   122  func startRaftNode(id, port uint16, tmpWorkingDir string, key *ecdsa.PrivateKey, nodes []*enode.Node) (*RaftService, error) {
   123  	datadir := fmt.Sprintf("%s/node%d", tmpWorkingDir, id)
   124  
   125  	ctx, _, err := prepareServiceContext(key)
   126  	if err != nil {
   127  		return nil, err
   128  	}
   129  
   130  	e, err := eth.New(ctx, &eth.Config{
   131  		Genesis: &core.Genesis{Config: params.QuorumTestChainConfig},
   132  	})
   133  	if err != nil {
   134  		return nil, err
   135  	}
   136  
   137  	s, err := New(ctx, params.QuorumTestChainConfig, id, port, false, 100*time.Millisecond, e, nodes, datadir, false)
   138  	if err != nil {
   139  		return nil, err
   140  	}
   141  
   142  	srv := &p2p.Server{
   143  		Config: p2p.Config{
   144  			PrivateKey: key,
   145  		},
   146  	}
   147  	if err := srv.Start(); err != nil {
   148  		return nil, fmt.Errorf("could not start: %v", err)
   149  	}
   150  	if err := s.Start(srv); err != nil {
   151  		return nil, err
   152  	}
   153  
   154  	return s, nil
   155  }