github.com/kisexp/xdchain@v0.0.0-20211206025815-490d6b732aa7/cmd/geth/les_test.go (about)

     1  package main
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"os"
     7  	"path/filepath"
     8  	"runtime"
     9  	"strings"
    10  	"sync/atomic"
    11  	"testing"
    12  	"time"
    13  
    14  	"github.com/kisexp/xdchain/p2p"
    15  	"github.com/kisexp/xdchain/rpc"
    16  )
    17  
    18  type gethrpc struct {
    19  	name     string
    20  	rpc      *rpc.Client
    21  	geth     *testgeth
    22  	nodeInfo *p2p.NodeInfo
    23  }
    24  
    25  func (g *gethrpc) killAndWait() {
    26  	g.geth.Kill()
    27  	g.geth.WaitExit()
    28  }
    29  
    30  func (g *gethrpc) callRPC(result interface{}, method string, args ...interface{}) {
    31  	if err := g.rpc.Call(&result, method, args...); err != nil {
    32  		g.geth.Fatalf("callRPC %v: %v", method, err)
    33  	}
    34  }
    35  
    36  func (g *gethrpc) addPeer(peer *gethrpc) {
    37  	g.geth.Logf("%v.addPeer(%v)", g.name, peer.name)
    38  	enode := peer.getNodeInfo().Enode
    39  	peerCh := make(chan *p2p.PeerEvent)
    40  	sub, err := g.rpc.Subscribe(context.Background(), "admin", peerCh, "peerEvents")
    41  	if err != nil {
    42  		g.geth.Fatalf("subscribe %v: %v", g.name, err)
    43  	}
    44  	defer sub.Unsubscribe()
    45  	g.callRPC(nil, "admin_addPeer", enode)
    46  	dur := 14 * time.Second
    47  	timeout := time.After(dur)
    48  	select {
    49  	case ev := <-peerCh:
    50  		g.geth.Logf("%v received event: type=%v, peer=%v", g.name, ev.Type, ev.Peer)
    51  	case err := <-sub.Err():
    52  		g.geth.Fatalf("%v sub error: %v", g.name, err)
    53  	case <-timeout:
    54  		g.geth.Error("timeout adding peer after", dur)
    55  	}
    56  }
    57  
    58  // Use this function instead of `g.nodeInfo` directly
    59  func (g *gethrpc) getNodeInfo() *p2p.NodeInfo {
    60  	if g.nodeInfo != nil {
    61  		return g.nodeInfo
    62  	}
    63  	g.nodeInfo = &p2p.NodeInfo{}
    64  	g.callRPC(&g.nodeInfo, "admin_nodeInfo")
    65  	return g.nodeInfo
    66  }
    67  
    68  func (g *gethrpc) waitSynced() {
    69  	// Check if it's synced now
    70  	var result interface{}
    71  	g.callRPC(&result, "eth_syncing")
    72  	syncing, ok := result.(bool)
    73  	if ok && !syncing {
    74  		g.geth.Logf("%v already synced", g.name)
    75  		return
    76  	}
    77  
    78  	// Actually wait, subscribe to the event
    79  	ch := make(chan interface{})
    80  	sub, err := g.rpc.Subscribe(context.Background(), "eth", ch, "syncing")
    81  	if err != nil {
    82  		g.geth.Fatalf("%v syncing: %v", g.name, err)
    83  	}
    84  	defer sub.Unsubscribe()
    85  	timeout := time.After(4 * time.Second)
    86  	select {
    87  	case ev := <-ch:
    88  		g.geth.Log("'syncing' event", ev)
    89  		syncing, ok := ev.(bool)
    90  		if ok && !syncing {
    91  			break
    92  		}
    93  		g.geth.Log("Other 'syncing' event", ev)
    94  	case err := <-sub.Err():
    95  		g.geth.Fatalf("%v notification: %v", g.name, err)
    96  		break
    97  	case <-timeout:
    98  		g.geth.Fatalf("%v timeout syncing", g.name)
    99  		break
   100  	}
   101  }
   102  
   103  // ipcEndpoint resolves an IPC endpoint based on a configured value, taking into
   104  // account the set data folders as well as the designated platform we're currently
   105  // running on.
   106  func ipcEndpoint(ipcPath, datadir string) string {
   107  	// On windows we can only use plain top-level pipes
   108  	if runtime.GOOS == "windows" {
   109  		if strings.HasPrefix(ipcPath, `\\.\pipe\`) {
   110  			return ipcPath
   111  		}
   112  		return `\\.\pipe\` + ipcPath
   113  	}
   114  	// Resolve names into the data directory full paths otherwise
   115  	if filepath.Base(ipcPath) == ipcPath {
   116  		if datadir == "" {
   117  			return filepath.Join(os.TempDir(), ipcPath)
   118  		}
   119  		return filepath.Join(datadir, ipcPath)
   120  	}
   121  	return ipcPath
   122  }
   123  
   124  // nextIPC ensures that each ipc pipe gets a unique name.
   125  // On linux, it works well to use ipc pipes all over the filesystem (in datadirs),
   126  // but windows require pipes to sit in "\\.\pipe\". Therefore, to run several
   127  // nodes simultaneously, we need to distinguish between them, which we do by
   128  // the pipe filename instead of folder.
   129  var nextIPC = uint32(0)
   130  
   131  func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc {
   132  	ipcName := fmt.Sprintf("geth-%d.ipc", atomic.AddUint32(&nextIPC, 1))
   133  	args = append([]string{"--networkid=42", "--port=0", "--nousb", "--ipcpath", ipcName}, args...)
   134  	t.Logf("Starting %v with rpc: %v", name, args)
   135  
   136  	g := &gethrpc{
   137  		name: name,
   138  		geth: runGeth(t, args...),
   139  	}
   140  	// wait before we can attach to it. TODO: probe for it properly
   141  	time.Sleep(1 * time.Second)
   142  	var err error
   143  	ipcpath := ipcEndpoint(ipcName, g.geth.Datadir)
   144  	if g.rpc, err = rpc.Dial(ipcpath); err != nil {
   145  		t.Fatalf("%v rpc connect to %v: %v", name, ipcpath, err)
   146  	}
   147  	return g
   148  }
   149  
   150  func initGeth(t *testing.T) string {
   151  	args := []string{"--nousb", "--networkid=42", "init", "./testdata/clique.json"}
   152  	t.Logf("Initializing geth: %v ", args)
   153  	g := runGeth(t, args...)
   154  	datadir := g.Datadir
   155  	g.WaitExit()
   156  	return datadir
   157  }
   158  
   159  func startLightServer(t *testing.T) *gethrpc {
   160  	datadir := initGeth(t)
   161  	t.Logf("Importing keys to geth")
   162  	runGeth(t, "--nousb", "--datadir", datadir, "--password", "./testdata/password.txt", "account", "import", "./testdata/key.prv", "--lightkdf").WaitExit()
   163  	account := "0x02f0d131f1f97aef08aec6e3291b957d9efe7105"
   164  	server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--mine", "--light.serve=100", "--light.maxpeers=1", "--nodiscover", "--nat=extip:127.0.0.1", "--verbosity=4")
   165  	return server
   166  }
   167  
   168  func startClient(t *testing.T, name string) *gethrpc {
   169  	datadir := initGeth(t)
   170  	return startGethWithIpc(t, name, "--datadir", datadir, "--nodiscover", "--syncmode=light", "--nat=extip:127.0.0.1", "--verbosity=4")
   171  }
   172  
   173  func TestPriorityClient(t *testing.T) {
   174  	// Quorum
   175  	t.Skip("skipping test in Quorum (no support for light sync mode).")
   176  	// End Quorum
   177  
   178  	lightServer := startLightServer(t)
   179  	defer lightServer.killAndWait()
   180  
   181  	// Start client and add lightServer as peer
   182  	freeCli := startClient(t, "freeCli")
   183  	defer freeCli.killAndWait()
   184  	freeCli.addPeer(lightServer)
   185  
   186  	var peers []*p2p.PeerInfo
   187  	freeCli.callRPC(&peers, "admin_peers")
   188  	if len(peers) != 1 {
   189  		t.Errorf("Expected: # of client peers == 1, actual: %v", len(peers))
   190  		return
   191  	}
   192  
   193  	// Set up priority client, get its nodeID, increase its balance on the lightServer
   194  	prioCli := startClient(t, "prioCli")
   195  	defer prioCli.killAndWait()
   196  	// 3_000_000_000 once we move to Go 1.13
   197  	tokens := uint64(3000000000)
   198  	lightServer.callRPC(nil, "les_addBalance", prioCli.getNodeInfo().ID, tokens)
   199  	prioCli.addPeer(lightServer)
   200  
   201  	// Check if priority client is actually syncing and the regular client got kicked out
   202  	prioCli.callRPC(&peers, "admin_peers")
   203  	if len(peers) != 1 {
   204  		t.Errorf("Expected: # of prio peers == 1, actual: %v", len(peers))
   205  	}
   206  
   207  	nodes := map[string]*gethrpc{
   208  		lightServer.getNodeInfo().ID: lightServer,
   209  		freeCli.getNodeInfo().ID:     freeCli,
   210  		prioCli.getNodeInfo().ID:     prioCli,
   211  	}
   212  	time.Sleep(1 * time.Second)
   213  	lightServer.callRPC(&peers, "admin_peers")
   214  	peersWithNames := make(map[string]string)
   215  	for _, p := range peers {
   216  		peersWithNames[nodes[p.ID].name] = p.ID
   217  	}
   218  	if _, freeClientFound := peersWithNames[freeCli.name]; freeClientFound {
   219  		t.Error("client is still a peer of lightServer", peersWithNames)
   220  	}
   221  	if _, prioClientFound := peersWithNames[prioCli.name]; !prioClientFound {
   222  		t.Error("prio client is not among lightServer peers", peersWithNames)
   223  	}
   224  }