github.com/tdcblockchain/tdcblockchain@v0.0.0-20191111034745-805c65ade158/les/api_test.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package les 18 19 import ( 20 "context" 21 "errors" 22 "flag" 23 "io/ioutil" 24 "math/rand" 25 "os" 26 "sync" 27 "sync/atomic" 28 "testing" 29 "time" 30 31 "github.com/ethereum/go-ethereum/common" 32 "github.com/ethereum/go-ethereum/common/hexutil" 33 "github.com/ethereum/go-ethereum/consensus/ethash" 34 "github.com/ethereum/go-ethereum/eth" 35 "github.com/ethereum/go-ethereum/eth/downloader" 36 "github.com/ethereum/go-ethereum/les/flowcontrol" 37 "github.com/ethereum/go-ethereum/log" 38 "github.com/ethereum/go-ethereum/node" 39 "github.com/ethereum/go-ethereum/p2p/enode" 40 "github.com/ethereum/go-ethereum/p2p/simulations" 41 "github.com/ethereum/go-ethereum/p2p/simulations/adapters" 42 "github.com/ethereum/go-ethereum/rpc" 43 "github.com/mattn/go-colorable" 44 ) 45 46 /* 47 This test is not meant to be a part of the automatic testing process because it 48 runs for a long time and also requires a large database in order to do a meaningful 49 request performance test. When testServerDataDir is empty, the test is skipped. 50 */ 51 52 const ( 53 testServerDataDir = "" // should always be empty on the master branch 54 testServerCapacity = 200 55 testMaxClients = 10 56 testTolerance = 0.1 57 minRelCap = 0.2 58 ) 59 60 func TestCapacityAPI3(t *testing.T) { 61 testCapacityAPI(t, 3) 62 } 63 64 func TestCapacityAPI6(t *testing.T) { 65 testCapacityAPI(t, 6) 66 } 67 68 func TestCapacityAPI10(t *testing.T) { 69 testCapacityAPI(t, 10) 70 } 71 72 // testCapacityAPI runs an end-to-end simulation test connecting one server with 73 // a given number of clients. It sets different priority capacities to all clients 74 // except a randomly selected one which runs in free client mode. All clients send 75 // similar requests at the maximum allowed rate and the test verifies whether the 76 // ratio of processed requests is close enough to the ratio of assigned capacities. 77 // Running multiple rounds with different settings ensures that changing capacity 78 // while connected and going back and forth between free and priority mode with 79 // the supplied API calls is also thoroughly tested. 80 func testCapacityAPI(t *testing.T, clientCount int) { 81 // Skip test if no data dir specified 82 if testServerDataDir == "" { 83 return 84 } 85 for !testSim(t, 1, clientCount, []string{testServerDataDir}, nil, func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool { 86 if len(servers) != 1 { 87 t.Fatalf("Invalid number of servers: %d", len(servers)) 88 } 89 server := servers[0] 90 91 serverRpcClient, err := server.Client() 92 if err != nil { 93 t.Fatalf("Failed to obtain rpc client: %v", err) 94 } 95 headNum, headHash := getHead(ctx, t, serverRpcClient) 96 minCap, freeCap, totalCap := getCapacityInfo(ctx, t, serverRpcClient) 97 testCap := totalCap * 3 / 4 98 t.Logf("Server testCap: %d minCap: %d head number: %d head hash: %064x\n", testCap, minCap, headNum, headHash) 99 reqMinCap := uint64(float64(testCap) * minRelCap / (minRelCap + float64(len(clients)-1))) 100 if minCap > reqMinCap { 101 t.Fatalf("Minimum client capacity (%d) bigger than required minimum for this test (%d)", minCap, reqMinCap) 102 } 103 freeIdx := rand.Intn(len(clients)) 104 105 clientRpcClients := make([]*rpc.Client, len(clients)) 106 for i, client := range clients { 107 var err error 108 clientRpcClients[i], err = client.Client() 109 if err != nil { 110 t.Fatalf("Failed to obtain rpc client: %v", err) 111 } 112 t.Log("connecting client", i) 113 if i != freeIdx { 114 setCapacity(ctx, t, serverRpcClient, client.ID(), testCap/uint64(len(clients))) 115 } 116 net.Connect(client.ID(), server.ID()) 117 118 for { 119 select { 120 case <-ctx.Done(): 121 t.Fatalf("Timeout") 122 default: 123 } 124 num, hash := getHead(ctx, t, clientRpcClients[i]) 125 if num == headNum && hash == headHash { 126 t.Log("client", i, "synced") 127 break 128 } 129 time.Sleep(time.Millisecond * 200) 130 } 131 } 132 133 var wg sync.WaitGroup 134 stop := make(chan struct{}) 135 136 reqCount := make([]uint64, len(clientRpcClients)) 137 138 // Send light request like crazy. 139 for i, c := range clientRpcClients { 140 wg.Add(1) 141 i, c := i, c 142 go func() { 143 defer wg.Done() 144 145 queue := make(chan struct{}, 100) 146 reqCount[i] = 0 147 for { 148 select { 149 case queue <- struct{}{}: 150 select { 151 case <-stop: 152 return 153 case <-ctx.Done(): 154 return 155 default: 156 wg.Add(1) 157 go func() { 158 ok := testRequest(ctx, t, c) 159 wg.Done() 160 <-queue 161 if ok { 162 count := atomic.AddUint64(&reqCount[i], 1) 163 if count%10000 == 0 { 164 freezeClient(ctx, t, serverRpcClient, clients[i].ID()) 165 } 166 } 167 }() 168 } 169 case <-stop: 170 return 171 case <-ctx.Done(): 172 return 173 } 174 } 175 }() 176 } 177 178 processedSince := func(start []uint64) []uint64 { 179 res := make([]uint64, len(reqCount)) 180 for i := range reqCount { 181 res[i] = atomic.LoadUint64(&reqCount[i]) 182 if start != nil { 183 res[i] -= start[i] 184 } 185 } 186 return res 187 } 188 189 weights := make([]float64, len(clients)) 190 for c := 0; c < 5; c++ { 191 setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), freeCap) 192 freeIdx = rand.Intn(len(clients)) 193 var sum float64 194 for i := range clients { 195 if i == freeIdx { 196 weights[i] = 0 197 } else { 198 weights[i] = rand.Float64()*(1-minRelCap) + minRelCap 199 } 200 sum += weights[i] 201 } 202 for i, client := range clients { 203 weights[i] *= float64(testCap-freeCap-100) / sum 204 capacity := uint64(weights[i]) 205 if i != freeIdx && capacity < getCapacity(ctx, t, serverRpcClient, client.ID()) { 206 setCapacity(ctx, t, serverRpcClient, client.ID(), capacity) 207 } 208 } 209 setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), 0) 210 for i, client := range clients { 211 capacity := uint64(weights[i]) 212 if i != freeIdx && capacity > getCapacity(ctx, t, serverRpcClient, client.ID()) { 213 setCapacity(ctx, t, serverRpcClient, client.ID(), capacity) 214 } 215 } 216 weights[freeIdx] = float64(freeCap) 217 for i := range clients { 218 weights[i] /= float64(testCap) 219 } 220 221 time.Sleep(flowcontrol.DecParamDelay) 222 t.Log("Starting measurement") 223 t.Logf("Relative weights:") 224 for i := range clients { 225 t.Logf(" %f", weights[i]) 226 } 227 t.Log() 228 start := processedSince(nil) 229 for { 230 select { 231 case <-ctx.Done(): 232 t.Fatalf("Timeout") 233 default: 234 } 235 236 _, _, totalCap = getCapacityInfo(ctx, t, serverRpcClient) 237 if totalCap < testCap { 238 t.Log("Total capacity underrun") 239 close(stop) 240 wg.Wait() 241 return false 242 } 243 244 processed := processedSince(start) 245 var avg uint64 246 t.Logf("Processed") 247 for i, p := range processed { 248 t.Logf(" %d", p) 249 processed[i] = uint64(float64(p) / weights[i]) 250 avg += processed[i] 251 } 252 avg /= uint64(len(processed)) 253 254 if avg >= 10000 { 255 var maxDev float64 256 for _, p := range processed { 257 dev := float64(int64(p-avg)) / float64(avg) 258 t.Logf(" %7.4f", dev) 259 if dev < 0 { 260 dev = -dev 261 } 262 if dev > maxDev { 263 maxDev = dev 264 } 265 } 266 t.Logf(" max deviation: %f totalCap: %d\n", maxDev, totalCap) 267 if maxDev <= testTolerance { 268 t.Log("success") 269 break 270 } 271 } else { 272 t.Log() 273 } 274 time.Sleep(time.Millisecond * 200) 275 } 276 } 277 278 close(stop) 279 wg.Wait() 280 281 for i, count := range reqCount { 282 t.Log("client", i, "processed", count) 283 } 284 return true 285 }) { 286 t.Log("restarting test") 287 } 288 } 289 290 func getHead(ctx context.Context, t *testing.T, client *rpc.Client) (uint64, common.Hash) { 291 res := make(map[string]interface{}) 292 if err := client.CallContext(ctx, &res, "eth_getBlockByNumber", "latest", false); err != nil { 293 t.Fatalf("Failed to obtain head block: %v", err) 294 } 295 numStr, ok := res["number"].(string) 296 if !ok { 297 t.Fatalf("RPC block number field invalid") 298 } 299 num, err := hexutil.DecodeUint64(numStr) 300 if err != nil { 301 t.Fatalf("Failed to decode RPC block number: %v", err) 302 } 303 hashStr, ok := res["hash"].(string) 304 if !ok { 305 t.Fatalf("RPC block number field invalid") 306 } 307 hash := common.HexToHash(hashStr) 308 return num, hash 309 } 310 311 func testRequest(ctx context.Context, t *testing.T, client *rpc.Client) bool { 312 var res string 313 var addr common.Address 314 rand.Read(addr[:]) 315 c, _ := context.WithTimeout(ctx, time.Second*12) 316 err := client.CallContext(c, &res, "eth_getBalance", addr, "latest") 317 if err != nil { 318 t.Log("request error:", err) 319 } 320 return err == nil 321 } 322 323 func freezeClient(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID) { 324 if err := server.CallContext(ctx, nil, "debug_freezeClient", clientID); err != nil { 325 t.Fatalf("Failed to freeze client: %v", err) 326 } 327 328 } 329 330 func setCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID, cap uint64) { 331 params := make(map[string]interface{}) 332 params["capacity"] = cap 333 if err := server.CallContext(ctx, nil, "les_setClientParams", []enode.ID{clientID}, []string{}, params); err != nil { 334 t.Fatalf("Failed to set client capacity: %v", err) 335 } 336 } 337 338 func getCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID) uint64 { 339 var res map[enode.ID]map[string]interface{} 340 if err := server.CallContext(ctx, &res, "les_clientInfo", []enode.ID{clientID}, []string{}); err != nil { 341 t.Fatalf("Failed to get client info: %v", err) 342 } 343 info, ok := res[clientID] 344 if !ok { 345 t.Fatalf("Missing client info") 346 } 347 v, ok := info["capacity"] 348 if !ok { 349 t.Fatalf("Missing field in client info: capacity") 350 } 351 vv, ok := v.(float64) 352 if !ok { 353 t.Fatalf("Failed to decode capacity field") 354 } 355 return uint64(vv) 356 } 357 358 func getCapacityInfo(ctx context.Context, t *testing.T, server *rpc.Client) (minCap, freeCap, totalCap uint64) { 359 var res map[string]interface{} 360 if err := server.CallContext(ctx, &res, "les_serverInfo"); err != nil { 361 t.Fatalf("Failed to query server info: %v", err) 362 } 363 decode := func(s string) uint64 { 364 v, ok := res[s] 365 if !ok { 366 t.Fatalf("Missing field in server info: %s", s) 367 } 368 vv, ok := v.(float64) 369 if !ok { 370 t.Fatalf("Failed to decode server info field: %s", s) 371 } 372 return uint64(vv) 373 } 374 minCap = decode("minimumCapacity") 375 freeCap = decode("freeClientCapacity") 376 totalCap = decode("totalCapacity") 377 return 378 } 379 380 func init() { 381 flag.Parse() 382 // register the Delivery service which will run as a devp2p 383 // protocol when using the exec adapter 384 adapters.RegisterServices(services) 385 386 log.PrintOrigins(true) 387 log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) 388 } 389 390 var ( 391 adapter = flag.String("adapter", "exec", "type of simulation: sim|socket|exec|docker") 392 loglevel = flag.Int("loglevel", 0, "verbosity of logs") 393 nodes = flag.Int("nodes", 0, "number of nodes") 394 ) 395 396 var services = adapters.Services{ 397 "lesclient": newLesClientService, 398 "lesserver": newLesServerService, 399 } 400 401 func NewNetwork() (*simulations.Network, func(), error) { 402 adapter, adapterTeardown, err := NewAdapter(*adapter, services) 403 if err != nil { 404 return nil, adapterTeardown, err 405 } 406 defaultService := "streamer" 407 net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{ 408 ID: "0", 409 DefaultService: defaultService, 410 }) 411 teardown := func() { 412 adapterTeardown() 413 net.Shutdown() 414 } 415 return net, teardown, nil 416 } 417 418 func NewAdapter(adapterType string, services adapters.Services) (adapter adapters.NodeAdapter, teardown func(), err error) { 419 teardown = func() {} 420 switch adapterType { 421 case "sim": 422 adapter = adapters.NewSimAdapter(services) 423 // case "socket": 424 // adapter = adapters.NewSocketAdapter(services) 425 case "exec": 426 baseDir, err0 := ioutil.TempDir("", "les-test") 427 if err0 != nil { 428 return nil, teardown, err0 429 } 430 teardown = func() { os.RemoveAll(baseDir) } 431 adapter = adapters.NewExecAdapter(baseDir) 432 /*case "docker": 433 adapter, err = adapters.NewDockerAdapter() 434 if err != nil { 435 return nil, teardown, err 436 }*/ 437 default: 438 return nil, teardown, errors.New("adapter needs to be one of sim, socket, exec, docker") 439 } 440 return adapter, teardown, nil 441 } 442 443 func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir []string, test func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool) bool { 444 net, teardown, err := NewNetwork() 445 defer teardown() 446 if err != nil { 447 t.Fatalf("Failed to create network: %v", err) 448 } 449 timeout := 1800 * time.Second 450 ctx, cancel := context.WithTimeout(context.Background(), timeout) 451 defer cancel() 452 453 servers := make([]*simulations.Node, serverCount) 454 clients := make([]*simulations.Node, clientCount) 455 456 for i := range clients { 457 clientconf := adapters.RandomNodeConfig() 458 clientconf.Services = []string{"lesclient"} 459 if len(clientDir) == clientCount { 460 clientconf.DataDir = clientDir[i] 461 } 462 client, err := net.NewNodeWithConfig(clientconf) 463 if err != nil { 464 t.Fatalf("Failed to create client: %v", err) 465 } 466 clients[i] = client 467 } 468 469 for i := range servers { 470 serverconf := adapters.RandomNodeConfig() 471 serverconf.Services = []string{"lesserver"} 472 if len(serverDir) == serverCount { 473 serverconf.DataDir = serverDir[i] 474 } 475 server, err := net.NewNodeWithConfig(serverconf) 476 if err != nil { 477 t.Fatalf("Failed to create server: %v", err) 478 } 479 servers[i] = server 480 } 481 482 for _, client := range clients { 483 if err := net.Start(client.ID()); err != nil { 484 t.Fatalf("Failed to start client node: %v", err) 485 } 486 } 487 for _, server := range servers { 488 if err := net.Start(server.ID()); err != nil { 489 t.Fatalf("Failed to start server node: %v", err) 490 } 491 } 492 493 return test(ctx, net, servers, clients) 494 } 495 496 func newLesClientService(ctx *adapters.ServiceContext) (node.Service, error) { 497 config := eth.DefaultConfig 498 config.SyncMode = downloader.LightSync 499 config.Ethash.PowMode = ethash.ModeFake 500 return New(ctx.NodeContext, &config) 501 } 502 503 func newLesServerService(ctx *adapters.ServiceContext) (node.Service, error) { 504 config := eth.DefaultConfig 505 config.SyncMode = downloader.FullSync 506 config.LightServ = testServerCapacity 507 config.LightPeers = testMaxClients 508 ethereum, err := eth.New(ctx.NodeContext, &config) 509 if err != nil { 510 return nil, err 511 } 512 server, err := NewLesServer(ethereum, &config) 513 if err != nil { 514 return nil, err 515 } 516 ethereum.AddLesServer(server) 517 return ethereum, nil 518 }