gitee.com/liu-zhao234568/cntest@v1.0.0/les/api_test.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package les 18 19 import ( 20 "context" 21 "errors" 22 "flag" 23 "io/ioutil" 24 "math/rand" 25 "os" 26 "sync" 27 "sync/atomic" 28 "testing" 29 "time" 30 31 "gitee.com/liu-zhao234568/cntest/common" 32 "gitee.com/liu-zhao234568/cntest/common/hexutil" 33 "gitee.com/liu-zhao234568/cntest/consensus/ethash" 34 "gitee.com/liu-zhao234568/cntest/eth" 35 "gitee.com/liu-zhao234568/cntest/eth/downloader" 36 "gitee.com/liu-zhao234568/cntest/eth/ethconfig" 37 "gitee.com/liu-zhao234568/cntest/les/flowcontrol" 38 "gitee.com/liu-zhao234568/cntest/log" 39 "gitee.com/liu-zhao234568/cntest/node" 40 "gitee.com/liu-zhao234568/cntest/p2p/enode" 41 "gitee.com/liu-zhao234568/cntest/p2p/simulations" 42 "gitee.com/liu-zhao234568/cntest/p2p/simulations/adapters" 43 "gitee.com/liu-zhao234568/cntest/rpc" 44 "github.com/mattn/go-colorable" 45 ) 46 47 // Additional command line flags for the test binary. 48 var ( 49 loglevel = flag.Int("loglevel", 0, "verbosity of logs") 50 simAdapter = flag.String("adapter", "exec", "type of simulation: sim|socket|exec|docker") 51 ) 52 53 func TestMain(m *testing.M) { 54 flag.Parse() 55 log.PrintOrigins(true) 56 log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) 57 // register the Delivery service which will run as a devp2p 58 // protocol when using the exec adapter 59 adapters.RegisterLifecycles(services) 60 os.Exit(m.Run()) 61 } 62 63 // This test is not meant to be a part of the automatic testing process because it 64 // runs for a long time and also requires a large database in order to do a meaningful 65 // request performance test. When testServerDataDir is empty, the test is skipped. 66 67 const ( 68 testServerDataDir = "" // should always be empty on the master branch 69 testServerCapacity = 200 70 testMaxClients = 10 71 testTolerance = 0.1 72 minRelCap = 0.2 73 ) 74 75 func TestCapacityAPI3(t *testing.T) { 76 testCapacityAPI(t, 3) 77 } 78 79 func TestCapacityAPI6(t *testing.T) { 80 testCapacityAPI(t, 6) 81 } 82 83 func TestCapacityAPI10(t *testing.T) { 84 testCapacityAPI(t, 10) 85 } 86 87 // testCapacityAPI runs an end-to-end simulation test connecting one server with 88 // a given number of clients. It sets different priority capacities to all clients 89 // except a randomly selected one which runs in free client mode. All clients send 90 // similar requests at the maximum allowed rate and the test verifies whether the 91 // ratio of processed requests is close enough to the ratio of assigned capacities. 92 // Running multiple rounds with different settings ensures that changing capacity 93 // while connected and going back and forth between free and priority mode with 94 // the supplied API calls is also thoroughly tested. 95 func testCapacityAPI(t *testing.T, clientCount int) { 96 // Skip test if no data dir specified 97 if testServerDataDir == "" { 98 return 99 } 100 for !testSim(t, 1, clientCount, []string{testServerDataDir}, nil, func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool { 101 if len(servers) != 1 { 102 t.Fatalf("Invalid number of servers: %d", len(servers)) 103 } 104 server := servers[0] 105 106 serverRpcClient, err := server.Client() 107 if err != nil { 108 t.Fatalf("Failed to obtain rpc client: %v", err) 109 } 110 headNum, headHash := getHead(ctx, t, serverRpcClient) 111 minCap, totalCap := getCapacityInfo(ctx, t, serverRpcClient) 112 testCap := totalCap * 3 / 4 113 t.Logf("Server testCap: %d minCap: %d head number: %d head hash: %064x\n", testCap, minCap, headNum, headHash) 114 reqMinCap := uint64(float64(testCap) * minRelCap / (minRelCap + float64(len(clients)-1))) 115 if minCap > reqMinCap { 116 t.Fatalf("Minimum client capacity (%d) bigger than required minimum for this test (%d)", minCap, reqMinCap) 117 } 118 freeIdx := rand.Intn(len(clients)) 119 120 clientRpcClients := make([]*rpc.Client, len(clients)) 121 for i, client := range clients { 122 var err error 123 clientRpcClients[i], err = client.Client() 124 if err != nil { 125 t.Fatalf("Failed to obtain rpc client: %v", err) 126 } 127 t.Log("connecting client", i) 128 if i != freeIdx { 129 setCapacity(ctx, t, serverRpcClient, client.ID(), testCap/uint64(len(clients))) 130 } 131 net.Connect(client.ID(), server.ID()) 132 133 for { 134 select { 135 case <-ctx.Done(): 136 t.Fatalf("Timeout") 137 default: 138 } 139 num, hash := getHead(ctx, t, clientRpcClients[i]) 140 if num == headNum && hash == headHash { 141 t.Log("client", i, "synced") 142 break 143 } 144 time.Sleep(time.Millisecond * 200) 145 } 146 } 147 148 var wg sync.WaitGroup 149 stop := make(chan struct{}) 150 151 reqCount := make([]uint64, len(clientRpcClients)) 152 153 // Send light request like crazy. 154 for i, c := range clientRpcClients { 155 wg.Add(1) 156 i, c := i, c 157 go func() { 158 defer wg.Done() 159 160 queue := make(chan struct{}, 100) 161 reqCount[i] = 0 162 for { 163 select { 164 case queue <- struct{}{}: 165 select { 166 case <-stop: 167 return 168 case <-ctx.Done(): 169 return 170 default: 171 wg.Add(1) 172 go func() { 173 ok := testRequest(ctx, t, c) 174 wg.Done() 175 <-queue 176 if ok { 177 count := atomic.AddUint64(&reqCount[i], 1) 178 if count%10000 == 0 { 179 freezeClient(ctx, t, serverRpcClient, clients[i].ID()) 180 } 181 } 182 }() 183 } 184 case <-stop: 185 return 186 case <-ctx.Done(): 187 return 188 } 189 } 190 }() 191 } 192 193 processedSince := func(start []uint64) []uint64 { 194 res := make([]uint64, len(reqCount)) 195 for i := range reqCount { 196 res[i] = atomic.LoadUint64(&reqCount[i]) 197 if start != nil { 198 res[i] -= start[i] 199 } 200 } 201 return res 202 } 203 204 weights := make([]float64, len(clients)) 205 for c := 0; c < 5; c++ { 206 setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), minCap) 207 freeIdx = rand.Intn(len(clients)) 208 var sum float64 209 for i := range clients { 210 if i == freeIdx { 211 weights[i] = 0 212 } else { 213 weights[i] = rand.Float64()*(1-minRelCap) + minRelCap 214 } 215 sum += weights[i] 216 } 217 for i, client := range clients { 218 weights[i] *= float64(testCap-minCap-100) / sum 219 capacity := uint64(weights[i]) 220 if i != freeIdx && capacity < getCapacity(ctx, t, serverRpcClient, client.ID()) { 221 setCapacity(ctx, t, serverRpcClient, client.ID(), capacity) 222 } 223 } 224 setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), 0) 225 for i, client := range clients { 226 capacity := uint64(weights[i]) 227 if i != freeIdx && capacity > getCapacity(ctx, t, serverRpcClient, client.ID()) { 228 setCapacity(ctx, t, serverRpcClient, client.ID(), capacity) 229 } 230 } 231 weights[freeIdx] = float64(minCap) 232 for i := range clients { 233 weights[i] /= float64(testCap) 234 } 235 236 time.Sleep(flowcontrol.DecParamDelay) 237 t.Log("Starting measurement") 238 t.Logf("Relative weights:") 239 for i := range clients { 240 t.Logf(" %f", weights[i]) 241 } 242 t.Log() 243 start := processedSince(nil) 244 for { 245 select { 246 case <-ctx.Done(): 247 t.Fatalf("Timeout") 248 default: 249 } 250 251 _, totalCap = getCapacityInfo(ctx, t, serverRpcClient) 252 if totalCap < testCap { 253 t.Log("Total capacity underrun") 254 close(stop) 255 wg.Wait() 256 return false 257 } 258 259 processed := processedSince(start) 260 var avg uint64 261 t.Logf("Processed") 262 for i, p := range processed { 263 t.Logf(" %d", p) 264 processed[i] = uint64(float64(p) / weights[i]) 265 avg += processed[i] 266 } 267 avg /= uint64(len(processed)) 268 269 if avg >= 10000 { 270 var maxDev float64 271 for _, p := range processed { 272 dev := float64(int64(p-avg)) / float64(avg) 273 t.Logf(" %7.4f", dev) 274 if dev < 0 { 275 dev = -dev 276 } 277 if dev > maxDev { 278 maxDev = dev 279 } 280 } 281 t.Logf(" max deviation: %f totalCap: %d\n", maxDev, totalCap) 282 if maxDev <= testTolerance { 283 t.Log("success") 284 break 285 } 286 } else { 287 t.Log() 288 } 289 time.Sleep(time.Millisecond * 200) 290 } 291 } 292 293 close(stop) 294 wg.Wait() 295 296 for i, count := range reqCount { 297 t.Log("client", i, "processed", count) 298 } 299 return true 300 }) { 301 t.Log("restarting test") 302 } 303 } 304 305 func getHead(ctx context.Context, t *testing.T, client *rpc.Client) (uint64, common.Hash) { 306 res := make(map[string]interface{}) 307 if err := client.CallContext(ctx, &res, "eth_getBlockByNumber", "latest", false); err != nil { 308 t.Fatalf("Failed to obtain head block: %v", err) 309 } 310 numStr, ok := res["number"].(string) 311 if !ok { 312 t.Fatalf("RPC block number field invalid") 313 } 314 num, err := hexutil.DecodeUint64(numStr) 315 if err != nil { 316 t.Fatalf("Failed to decode RPC block number: %v", err) 317 } 318 hashStr, ok := res["hash"].(string) 319 if !ok { 320 t.Fatalf("RPC block number field invalid") 321 } 322 hash := common.HexToHash(hashStr) 323 return num, hash 324 } 325 326 func testRequest(ctx context.Context, t *testing.T, client *rpc.Client) bool { 327 var res string 328 var addr common.Address 329 rand.Read(addr[:]) 330 c, cancel := context.WithTimeout(ctx, time.Second*12) 331 defer cancel() 332 err := client.CallContext(c, &res, "eth_getBalance", addr, "latest") 333 if err != nil { 334 t.Log("request error:", err) 335 } 336 return err == nil 337 } 338 339 func freezeClient(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID) { 340 if err := server.CallContext(ctx, nil, "debug_freezeClient", clientID); err != nil { 341 t.Fatalf("Failed to freeze client: %v", err) 342 } 343 344 } 345 346 func setCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID, cap uint64) { 347 params := make(map[string]interface{}) 348 params["capacity"] = cap 349 if err := server.CallContext(ctx, nil, "les_setClientParams", []enode.ID{clientID}, []string{}, params); err != nil { 350 t.Fatalf("Failed to set client capacity: %v", err) 351 } 352 } 353 354 func getCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID) uint64 { 355 var res map[enode.ID]map[string]interface{} 356 if err := server.CallContext(ctx, &res, "les_clientInfo", []enode.ID{clientID}, []string{}); err != nil { 357 t.Fatalf("Failed to get client info: %v", err) 358 } 359 info, ok := res[clientID] 360 if !ok { 361 t.Fatalf("Missing client info") 362 } 363 v, ok := info["capacity"] 364 if !ok { 365 t.Fatalf("Missing field in client info: capacity") 366 } 367 vv, ok := v.(float64) 368 if !ok { 369 t.Fatalf("Failed to decode capacity field") 370 } 371 return uint64(vv) 372 } 373 374 func getCapacityInfo(ctx context.Context, t *testing.T, server *rpc.Client) (minCap, totalCap uint64) { 375 var res map[string]interface{} 376 if err := server.CallContext(ctx, &res, "les_serverInfo"); err != nil { 377 t.Fatalf("Failed to query server info: %v", err) 378 } 379 decode := func(s string) uint64 { 380 v, ok := res[s] 381 if !ok { 382 t.Fatalf("Missing field in server info: %s", s) 383 } 384 vv, ok := v.(float64) 385 if !ok { 386 t.Fatalf("Failed to decode server info field: %s", s) 387 } 388 return uint64(vv) 389 } 390 minCap = decode("minimumCapacity") 391 totalCap = decode("totalCapacity") 392 return 393 } 394 395 var services = adapters.LifecycleConstructors{ 396 "lesclient": newLesClientService, 397 "lesserver": newLesServerService, 398 } 399 400 func NewNetwork() (*simulations.Network, func(), error) { 401 adapter, adapterTeardown, err := NewAdapter(*simAdapter, services) 402 if err != nil { 403 return nil, adapterTeardown, err 404 } 405 defaultService := "streamer" 406 net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{ 407 ID: "0", 408 DefaultService: defaultService, 409 }) 410 teardown := func() { 411 adapterTeardown() 412 net.Shutdown() 413 } 414 return net, teardown, nil 415 } 416 417 func NewAdapter(adapterType string, services adapters.LifecycleConstructors) (adapter adapters.NodeAdapter, teardown func(), err error) { 418 teardown = func() {} 419 switch adapterType { 420 case "sim": 421 adapter = adapters.NewSimAdapter(services) 422 // case "socket": 423 // adapter = adapters.NewSocketAdapter(services) 424 case "exec": 425 baseDir, err0 := ioutil.TempDir("", "les-test") 426 if err0 != nil { 427 return nil, teardown, err0 428 } 429 teardown = func() { os.RemoveAll(baseDir) } 430 adapter = adapters.NewExecAdapter(baseDir) 431 /*case "docker": 432 adapter, err = adapters.NewDockerAdapter() 433 if err != nil { 434 return nil, teardown, err 435 }*/ 436 default: 437 return nil, teardown, errors.New("adapter needs to be one of sim, socket, exec, docker") 438 } 439 return adapter, teardown, nil 440 } 441 442 func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir []string, test func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool) bool { 443 net, teardown, err := NewNetwork() 444 defer teardown() 445 if err != nil { 446 t.Fatalf("Failed to create network: %v", err) 447 } 448 timeout := 1800 * time.Second 449 ctx, cancel := context.WithTimeout(context.Background(), timeout) 450 defer cancel() 451 452 servers := make([]*simulations.Node, serverCount) 453 clients := make([]*simulations.Node, clientCount) 454 455 for i := range clients { 456 clientconf := adapters.RandomNodeConfig() 457 clientconf.Lifecycles = []string{"lesclient"} 458 if len(clientDir) == clientCount { 459 clientconf.DataDir = clientDir[i] 460 } 461 client, err := net.NewNodeWithConfig(clientconf) 462 if err != nil { 463 t.Fatalf("Failed to create client: %v", err) 464 } 465 clients[i] = client 466 } 467 468 for i := range servers { 469 serverconf := adapters.RandomNodeConfig() 470 serverconf.Lifecycles = []string{"lesserver"} 471 if len(serverDir) == serverCount { 472 serverconf.DataDir = serverDir[i] 473 } 474 server, err := net.NewNodeWithConfig(serverconf) 475 if err != nil { 476 t.Fatalf("Failed to create server: %v", err) 477 } 478 servers[i] = server 479 } 480 481 for _, client := range clients { 482 if err := net.Start(client.ID()); err != nil { 483 t.Fatalf("Failed to start client node: %v", err) 484 } 485 } 486 for _, server := range servers { 487 if err := net.Start(server.ID()); err != nil { 488 t.Fatalf("Failed to start server node: %v", err) 489 } 490 } 491 492 return test(ctx, net, servers, clients) 493 } 494 495 func newLesClientService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) { 496 config := ethconfig.Defaults 497 config.SyncMode = downloader.LightSync 498 config.Ethash.PowMode = ethash.ModeFake 499 return New(stack, &config) 500 } 501 502 func newLesServerService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) { 503 config := ethconfig.Defaults 504 config.SyncMode = downloader.FullSync 505 config.LightServ = testServerCapacity 506 config.LightPeers = testMaxClients 507 ethereum, err := eth.New(stack, &config) 508 if err != nil { 509 return nil, err 510 } 511 _, err = NewLesServer(stack, ethereum, &config) 512 if err != nil { 513 return nil, err 514 } 515 return ethereum, nil 516 }