github.com/bearnetworkchain/go-bearnetwork@v1.10.19-0.20220604150648-d63890c2e42b/cmd/geth/les_test.go (about) 1 // Copyright 2020 The go-ethereum Authors 2 // This file is part of go-ethereum. 3 // 4 // go-ethereum is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // go-ethereum is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU General Public License for more details. 13 // 14 // You should have received a copy of the GNU General Public License 15 // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. 16 17 package main 18 19 import ( 20 "context" 21 "fmt" 22 "os" 23 "path/filepath" 24 "runtime" 25 "strings" 26 "sync/atomic" 27 "testing" 28 "time" 29 30 "github.com/bearnetworkchain/go-bearnetwork/p2p" 31 "github.com/bearnetworkchain/go-bearnetwork/rpc" 32 ) 33 34 type gethrpc struct { 35 name string 36 rpc *rpc.Client 37 geth *testgeth 38 nodeInfo *p2p.NodeInfo 39 } 40 41 func (g *gethrpc) killAndWait() { 42 g.geth.Kill() 43 g.geth.WaitExit() 44 } 45 46 func (g *gethrpc) callRPC(result interface{}, method string, args ...interface{}) { 47 if err := g.rpc.Call(&result, method, args...); err != nil { 48 g.geth.Fatalf("callRPC %v: %v", method, err) 49 } 50 } 51 52 func (g *gethrpc) addPeer(peer *gethrpc) { 53 g.geth.Logf("%v.addPeer(%v)", g.name, peer.name) 54 enode := peer.getNodeInfo().Enode 55 peerCh := make(chan *p2p.PeerEvent) 56 sub, err := g.rpc.Subscribe(context.Background(), "admin", peerCh, "peerEvents") 57 if err != nil { 58 g.geth.Fatalf("subscribe %v: %v", g.name, err) 59 } 60 defer sub.Unsubscribe() 61 g.callRPC(nil, "admin_addPeer", enode) 62 dur := 14 * time.Second 63 timeout := time.After(dur) 64 select { 65 case ev := <-peerCh: 66 g.geth.Logf("%v received event: type=%v, peer=%v", g.name, ev.Type, ev.Peer) 67 case err := <-sub.Err(): 68 g.geth.Fatalf("%v sub error: %v", g.name, err) 69 case <-timeout: 70 g.geth.Error("timeout adding peer after", dur) 71 } 72 } 73 74 // Use this function instead of `g.nodeInfo` directly 75 func (g *gethrpc) getNodeInfo() *p2p.NodeInfo { 76 if g.nodeInfo != nil { 77 return g.nodeInfo 78 } 79 g.nodeInfo = &p2p.NodeInfo{} 80 g.callRPC(&g.nodeInfo, "admin_nodeInfo") 81 return g.nodeInfo 82 } 83 84 func (g *gethrpc) waitSynced() { 85 // Check if it's synced now 86 var result interface{} 87 g.callRPC(&result, "eth_syncing") 88 syncing, ok := result.(bool) 89 if ok && !syncing { 90 g.geth.Logf("%v already synced", g.name) 91 return 92 } 93 94 // Actually wait, subscribe to the event 95 ch := make(chan interface{}) 96 sub, err := g.rpc.Subscribe(context.Background(), "eth", ch, "syncing") 97 if err != nil { 98 g.geth.Fatalf("%v syncing: %v", g.name, err) 99 } 100 defer sub.Unsubscribe() 101 timeout := time.After(4 * time.Second) 102 select { 103 case ev := <-ch: 104 g.geth.Log("'syncing' event", ev) 105 syncing, ok := ev.(bool) 106 if ok && !syncing { 107 break 108 } 109 g.geth.Log("Other 'syncing' event", ev) 110 case err := <-sub.Err(): 111 g.geth.Fatalf("%v notification: %v", g.name, err) 112 break 113 case <-timeout: 114 g.geth.Fatalf("%v timeout syncing", g.name) 115 break 116 } 117 } 118 119 // ipcEndpoint resolves an IPC endpoint based on a configured value, taking into 120 // account the set data folders as well as the designated platform we're currently 121 // running on. 122 func ipcEndpoint(ipcPath, datadir string) string { 123 // On windows we can only use plain top-level pipes 124 if runtime.GOOS == "windows" { 125 if strings.HasPrefix(ipcPath, `\\.\pipe\`) { 126 return ipcPath 127 } 128 return `\\.\pipe\` + ipcPath 129 } 130 // Resolve names into the data directory full paths otherwise 131 if filepath.Base(ipcPath) == ipcPath { 132 if datadir == "" { 133 return filepath.Join(os.TempDir(), ipcPath) 134 } 135 return filepath.Join(datadir, ipcPath) 136 } 137 return ipcPath 138 } 139 140 // nextIPC ensures that each ipc pipe gets a unique name. 141 // On linux, it works well to use ipc pipes all over the filesystem (in datadirs), 142 // but windows require pipes to sit in "\\.\pipe\". Therefore, to run several 143 // nodes simultaneously, we need to distinguish between them, which we do by 144 // the pipe filename instead of folder. 145 var nextIPC = uint32(0) 146 147 func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc { 148 ipcName := fmt.Sprintf("geth-%d.ipc", atomic.AddUint32(&nextIPC, 1)) 149 args = append([]string{"--networkid=42", "--port=0", "--ipcpath", ipcName}, args...) 150 t.Logf("Starting %v with rpc: %v", name, args) 151 152 g := &gethrpc{ 153 name: name, 154 geth: runGeth(t, args...), 155 } 156 ipcpath := ipcEndpoint(ipcName, g.geth.Datadir) 157 // We can't know exactly how long geth will take to start, so we try 10 158 // times over a 5 second period. 159 var err error 160 for i := 0; i < 10; i++ { 161 time.Sleep(500 * time.Millisecond) 162 if g.rpc, err = rpc.Dial(ipcpath); err == nil { 163 return g 164 } 165 } 166 t.Fatalf("%v rpc connect to %v: %v", name, ipcpath, err) 167 return nil 168 } 169 170 func initGeth(t *testing.T) string { 171 args := []string{"--networkid=42", "init", "./testdata/clique.json"} 172 t.Logf("Initializing geth: %v ", args) 173 g := runGeth(t, args...) 174 datadir := g.Datadir 175 g.WaitExit() 176 return datadir 177 } 178 179 func startLightServer(t *testing.T) *gethrpc { 180 datadir := initGeth(t) 181 t.Logf("Importing keys to geth") 182 runGeth(t, "--datadir", datadir, "--password", "./testdata/password.txt", "account", "import", "./testdata/key.prv", "--lightkdf").WaitExit() 183 account := "0x02f0d131f1f97aef08aec6e3291b957d9efe7105" 184 server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--mine", "--light.serve=100", "--light.maxpeers=1", "--nodiscover", "--nat=extip:127.0.0.1", "--verbosity=4") 185 return server 186 } 187 188 func startClient(t *testing.T, name string) *gethrpc { 189 datadir := initGeth(t) 190 return startGethWithIpc(t, name, "--datadir", datadir, "--nodiscover", "--syncmode=light", "--nat=extip:127.0.0.1", "--verbosity=4") 191 } 192 193 func TestPriorityClient(t *testing.T) { 194 lightServer := startLightServer(t) 195 defer lightServer.killAndWait() 196 197 // Start client and add lightServer as peer 198 freeCli := startClient(t, "freeCli") 199 defer freeCli.killAndWait() 200 freeCli.addPeer(lightServer) 201 202 var peers []*p2p.PeerInfo 203 freeCli.callRPC(&peers, "admin_peers") 204 if len(peers) != 1 { 205 t.Errorf("Expected: # of client peers == 1, actual: %v", len(peers)) 206 return 207 } 208 209 // Set up priority client, get its nodeID, increase its balance on the lightServer 210 prioCli := startClient(t, "prioCli") 211 defer prioCli.killAndWait() 212 // 3_000_000_000 once we move to Go 1.13 213 tokens := uint64(3000000000) 214 lightServer.callRPC(nil, "les_addBalance", prioCli.getNodeInfo().ID, tokens) 215 prioCli.addPeer(lightServer) 216 217 // Check if priority client is actually syncing and the regular client got kicked out 218 prioCli.callRPC(&peers, "admin_peers") 219 if len(peers) != 1 { 220 t.Errorf("Expected: # of prio peers == 1, actual: %v", len(peers)) 221 } 222 223 nodes := map[string]*gethrpc{ 224 lightServer.getNodeInfo().ID: lightServer, 225 freeCli.getNodeInfo().ID: freeCli, 226 prioCli.getNodeInfo().ID: prioCli, 227 } 228 time.Sleep(1 * time.Second) 229 lightServer.callRPC(&peers, "admin_peers") 230 peersWithNames := make(map[string]string) 231 for _, p := range peers { 232 peersWithNames[nodes[p.ID].name] = p.ID 233 } 234 if _, freeClientFound := peersWithNames[freeCli.name]; freeClientFound { 235 t.Error("client is still a peer of lightServer", peersWithNames) 236 } 237 if _, prioClientFound := peersWithNames[prioCli.name]; !prioClientFound { 238 t.Error("prio client is not among lightServer peers", peersWithNames) 239 } 240 }