github.com/shrimpyuk/bor@v0.2.15-0.20220224151350-fb4ec6020bae/cmd/geth/les_test.go (about)

     1  package main
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"os"
     7  	"path/filepath"
     8  	"runtime"
     9  	"strings"
    10  	"sync/atomic"
    11  	"testing"
    12  	"time"
    13  
    14  	"github.com/ethereum/go-ethereum/p2p"
    15  	"github.com/ethereum/go-ethereum/rpc"
    16  )
    17  
    18  type gethrpc struct {
    19  	name     string
    20  	rpc      *rpc.Client
    21  	geth     *testgeth
    22  	nodeInfo *p2p.NodeInfo
    23  }
    24  
    25  func (g *gethrpc) killAndWait() {
    26  	g.geth.Kill()
    27  	g.geth.WaitExit()
    28  }
    29  
    30  func (g *gethrpc) callRPC(result interface{}, method string, args ...interface{}) {
    31  	if err := g.rpc.Call(&result, method, args...); err != nil {
    32  		g.geth.Fatalf("callRPC %v: %v", method, err)
    33  	}
    34  }
    35  
    36  func (g *gethrpc) addPeer(peer *gethrpc) {
    37  	g.geth.Logf("%v.addPeer(%v)", g.name, peer.name)
    38  	enode := peer.getNodeInfo().Enode
    39  	peerCh := make(chan *p2p.PeerEvent)
    40  	sub, err := g.rpc.Subscribe(context.Background(), "admin", peerCh, "peerEvents")
    41  	if err != nil {
    42  		g.geth.Fatalf("subscribe %v: %v", g.name, err)
    43  	}
    44  	defer sub.Unsubscribe()
    45  	g.callRPC(nil, "admin_addPeer", enode)
    46  	dur := 14 * time.Second
    47  	timeout := time.After(dur)
    48  	select {
    49  	case ev := <-peerCh:
    50  		g.geth.Logf("%v received event: type=%v, peer=%v", g.name, ev.Type, ev.Peer)
    51  	case err := <-sub.Err():
    52  		g.geth.Fatalf("%v sub error: %v", g.name, err)
    53  	case <-timeout:
    54  		g.geth.Error("timeout adding peer after", dur)
    55  	}
    56  }
    57  
    58  // Use this function instead of `g.nodeInfo` directly
    59  func (g *gethrpc) getNodeInfo() *p2p.NodeInfo {
    60  	if g.nodeInfo != nil {
    61  		return g.nodeInfo
    62  	}
    63  	g.nodeInfo = &p2p.NodeInfo{}
    64  	g.callRPC(&g.nodeInfo, "admin_nodeInfo")
    65  	return g.nodeInfo
    66  }
    67  
    68  func (g *gethrpc) waitSynced() {
    69  	// Check if it's synced now
    70  	var result interface{}
    71  	g.callRPC(&result, "eth_syncing")
    72  	syncing, ok := result.(bool)
    73  	if ok && !syncing {
    74  		g.geth.Logf("%v already synced", g.name)
    75  		return
    76  	}
    77  
    78  	// Actually wait, subscribe to the event
    79  	ch := make(chan interface{})
    80  	sub, err := g.rpc.Subscribe(context.Background(), "eth", ch, "syncing")
    81  	if err != nil {
    82  		g.geth.Fatalf("%v syncing: %v", g.name, err)
    83  	}
    84  	defer sub.Unsubscribe()
    85  	timeout := time.After(4 * time.Second)
    86  	select {
    87  	case ev := <-ch:
    88  		g.geth.Log("'syncing' event", ev)
    89  		syncing, ok := ev.(bool)
    90  		if ok && !syncing {
    91  			break
    92  		}
    93  		g.geth.Log("Other 'syncing' event", ev)
    94  	case err := <-sub.Err():
    95  		g.geth.Fatalf("%v notification: %v", g.name, err)
    96  		break
    97  	case <-timeout:
    98  		g.geth.Fatalf("%v timeout syncing", g.name)
    99  		break
   100  	}
   101  }
   102  
   103  // ipcEndpoint resolves an IPC endpoint based on a configured value, taking into
   104  // account the set data folders as well as the designated platform we're currently
   105  // running on.
   106  func ipcEndpoint(ipcPath, datadir string) string {
   107  	// On windows we can only use plain top-level pipes
   108  	if runtime.GOOS == "windows" {
   109  		if strings.HasPrefix(ipcPath, `\\.\pipe\`) {
   110  			return ipcPath
   111  		}
   112  		return `\\.\pipe\` + ipcPath
   113  	}
   114  	// Resolve names into the data directory full paths otherwise
   115  	if filepath.Base(ipcPath) == ipcPath {
   116  		if datadir == "" {
   117  			return filepath.Join(os.TempDir(), ipcPath)
   118  		}
   119  		return filepath.Join(datadir, ipcPath)
   120  	}
   121  	return ipcPath
   122  }
   123  
   124  // nextIPC ensures that each ipc pipe gets a unique name.
   125  // On linux, it works well to use ipc pipes all over the filesystem (in datadirs),
   126  // but windows require pipes to sit in "\\.\pipe\". Therefore, to run several
   127  // nodes simultaneously, we need to distinguish between them, which we do by
   128  // the pipe filename instead of folder.
   129  var nextIPC = uint32(0)
   130  
   131  func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc {
   132  	ipcName := fmt.Sprintf("geth-%d.ipc", atomic.AddUint32(&nextIPC, 1))
   133  	args = append([]string{"--networkid=42", "--port=0", "--ipcpath", ipcName}, args...)
   134  	t.Logf("Starting %v with rpc: %v", name, args)
   135  
   136  	g := &gethrpc{
   137  		name: name,
   138  		geth: runGeth(t, args...),
   139  	}
   140  	ipcpath := ipcEndpoint(ipcName, g.geth.Datadir)
   141  	// We can't know exactly how long geth will take to start, so we try 10
   142  	// times over a 5 second period.
   143  	var err error
   144  	for i := 0; i < 10; i++ {
   145  		time.Sleep(500 * time.Millisecond)
   146  		if g.rpc, err = rpc.Dial(ipcpath); err == nil {
   147  			return g
   148  		}
   149  	}
   150  	t.Fatalf("%v rpc connect to %v: %v", name, ipcpath, err)
   151  	return nil
   152  }
   153  
   154  func initGeth(t *testing.T) string {
   155  	args := []string{"--networkid=42", "init", "./testdata/clique.json"}
   156  	t.Logf("Initializing geth: %v ", args)
   157  	g := runGeth(t, args...)
   158  	datadir := g.Datadir
   159  	g.WaitExit()
   160  	return datadir
   161  }
   162  
   163  func startLightServer(t *testing.T) *gethrpc {
   164  	datadir := initGeth(t)
   165  	t.Logf("Importing keys to geth")
   166  	runGeth(t, "--datadir", datadir, "--password", "./testdata/password.txt", "account", "import", "./testdata/key.prv", "--lightkdf").WaitExit()
   167  	account := "0x02f0d131f1f97aef08aec6e3291b957d9efe7105"
   168  	server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--mine", "--light.serve=100", "--light.maxpeers=1", "--nodiscover", "--nat=extip:127.0.0.1", "--verbosity=4")
   169  	return server
   170  }
   171  
   172  func startClient(t *testing.T, name string) *gethrpc {
   173  	datadir := initGeth(t)
   174  	return startGethWithIpc(t, name, "--datadir", datadir, "--nodiscover", "--syncmode=light", "--nat=extip:127.0.0.1", "--verbosity=4")
   175  }
   176  
   177  func TestPriorityClient(t *testing.T) {
   178  	lightServer := startLightServer(t)
   179  	defer lightServer.killAndWait()
   180  
   181  	// Start client and add lightServer as peer
   182  	freeCli := startClient(t, "freeCli")
   183  	defer freeCli.killAndWait()
   184  	freeCli.addPeer(lightServer)
   185  
   186  	var peers []*p2p.PeerInfo
   187  	freeCli.callRPC(&peers, "admin_peers")
   188  	if len(peers) != 1 {
   189  		t.Errorf("Expected: # of client peers == 1, actual: %v", len(peers))
   190  		return
   191  	}
   192  
   193  	// Set up priority client, get its nodeID, increase its balance on the lightServer
   194  	prioCli := startClient(t, "prioCli")
   195  	defer prioCli.killAndWait()
   196  	// 3_000_000_000 once we move to Go 1.13
   197  	tokens := uint64(3000000000)
   198  	lightServer.callRPC(nil, "les_addBalance", prioCli.getNodeInfo().ID, tokens)
   199  	prioCli.addPeer(lightServer)
   200  
   201  	// Check if priority client is actually syncing and the regular client got kicked out
   202  	prioCli.callRPC(&peers, "admin_peers")
   203  	if len(peers) != 1 {
   204  		t.Errorf("Expected: # of prio peers == 1, actual: %v", len(peers))
   205  	}
   206  
   207  	nodes := map[string]*gethrpc{
   208  		lightServer.getNodeInfo().ID: lightServer,
   209  		freeCli.getNodeInfo().ID:     freeCli,
   210  		prioCli.getNodeInfo().ID:     prioCli,
   211  	}
   212  	time.Sleep(1 * time.Second)
   213  	lightServer.callRPC(&peers, "admin_peers")
   214  	peersWithNames := make(map[string]string)
   215  	for _, p := range peers {
   216  		peersWithNames[nodes[p.ID].name] = p.ID
   217  	}
   218  	if _, freeClientFound := peersWithNames[freeCli.name]; freeClientFound {
   219  		t.Error("client is still a peer of lightServer", peersWithNames)
   220  	}
   221  	if _, prioClientFound := peersWithNames[prioCli.name]; !prioClientFound {
   222  		t.Error("prio client is not among lightServer peers", peersWithNames)
   223  	}
   224  }