github.com/wzbox/go-ethereum@v1.9.2/les/api_test.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"flag"
    23  	"io/ioutil"
    24  	"math/rand"
    25  	"os"
    26  	"sync"
    27  	"sync/atomic"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/ethereum/go-ethereum/common"
    32  	"github.com/ethereum/go-ethereum/common/hexutil"
    33  	"github.com/ethereum/go-ethereum/consensus/ethash"
    34  	"github.com/ethereum/go-ethereum/eth"
    35  	"github.com/ethereum/go-ethereum/eth/downloader"
    36  	"github.com/ethereum/go-ethereum/les/flowcontrol"
    37  	"github.com/ethereum/go-ethereum/log"
    38  	"github.com/ethereum/go-ethereum/node"
    39  	"github.com/ethereum/go-ethereum/p2p/enode"
    40  	"github.com/ethereum/go-ethereum/p2p/simulations"
    41  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    42  	"github.com/ethereum/go-ethereum/rpc"
    43  	"github.com/mattn/go-colorable"
    44  )
    45  
    46  /*
    47  This test is not meant to be a part of the automatic testing process because it
    48  runs for a long time and also requires a large database in order to do a meaningful
    49  request performance test. When testServerDataDir is empty, the test is skipped.
    50  */
    51  
    52  const (
    53  	testServerDataDir  = "" // should always be empty on the master branch
    54  	testServerCapacity = 200
    55  	testMaxClients     = 10
    56  	testTolerance      = 0.1
    57  	minRelCap          = 0.2
    58  )
    59  
    60  func TestCapacityAPI3(t *testing.T) {
    61  	testCapacityAPI(t, 3)
    62  }
    63  
    64  func TestCapacityAPI6(t *testing.T) {
    65  	testCapacityAPI(t, 6)
    66  }
    67  
    68  func TestCapacityAPI10(t *testing.T) {
    69  	testCapacityAPI(t, 10)
    70  }
    71  
    72  // testCapacityAPI runs an end-to-end simulation test connecting one server with
    73  // a given number of clients. It sets different priority capacities to all clients
    74  // except a randomly selected one which runs in free client mode. All clients send
    75  // similar requests at the maximum allowed rate and the test verifies whether the
    76  // ratio of processed requests is close enough to the ratio of assigned capacities.
    77  // Running multiple rounds with different settings ensures that changing capacity
    78  // while connected and going back and forth between free and priority mode with
    79  // the supplied API calls is also thoroughly tested.
    80  func testCapacityAPI(t *testing.T, clientCount int) {
    81  	if testServerDataDir == "" {
    82  		// Skip test if no data dir specified
    83  		return
    84  	}
    85  
    86  	for !testSim(t, 1, clientCount, []string{testServerDataDir}, nil, func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool {
    87  		if len(servers) != 1 {
    88  			t.Fatalf("Invalid number of servers: %d", len(servers))
    89  		}
    90  		server := servers[0]
    91  
    92  		clientRpcClients := make([]*rpc.Client, len(clients))
    93  
    94  		serverRpcClient, err := server.Client()
    95  		if err != nil {
    96  			t.Fatalf("Failed to obtain rpc client: %v", err)
    97  		}
    98  		headNum, headHash := getHead(ctx, t, serverRpcClient)
    99  		minCap, freeCap, totalCap := getCapacityInfo(ctx, t, serverRpcClient)
   100  		testCap := totalCap * 3 / 4
   101  		t.Logf("Server testCap: %d  minCap: %d  head number: %d  head hash: %064x\n", testCap, minCap, headNum, headHash)
   102  		reqMinCap := uint64(float64(testCap) * minRelCap / (minRelCap + float64(len(clients)-1)))
   103  		if minCap > reqMinCap {
   104  			t.Fatalf("Minimum client capacity (%d) bigger than required minimum for this test (%d)", minCap, reqMinCap)
   105  		}
   106  		freeIdx := rand.Intn(len(clients))
   107  
   108  		for i, client := range clients {
   109  			var err error
   110  			clientRpcClients[i], err = client.Client()
   111  			if err != nil {
   112  				t.Fatalf("Failed to obtain rpc client: %v", err)
   113  			}
   114  
   115  			t.Log("connecting client", i)
   116  			if i != freeIdx {
   117  				setCapacity(ctx, t, serverRpcClient, client.ID(), testCap/uint64(len(clients)))
   118  			}
   119  			net.Connect(client.ID(), server.ID())
   120  
   121  			for {
   122  				select {
   123  				case <-ctx.Done():
   124  					t.Fatalf("Timeout")
   125  				default:
   126  				}
   127  				num, hash := getHead(ctx, t, clientRpcClients[i])
   128  				if num == headNum && hash == headHash {
   129  					t.Log("client", i, "synced")
   130  					break
   131  				}
   132  				time.Sleep(time.Millisecond * 200)
   133  			}
   134  		}
   135  
   136  		var wg sync.WaitGroup
   137  		stop := make(chan struct{})
   138  
   139  		reqCount := make([]uint64, len(clientRpcClients))
   140  
   141  		for i, c := range clientRpcClients {
   142  			wg.Add(1)
   143  			i, c := i, c
   144  			go func() {
   145  				queue := make(chan struct{}, 100)
   146  				reqCount[i] = 0
   147  				for {
   148  					select {
   149  					case queue <- struct{}{}:
   150  						select {
   151  						case <-stop:
   152  							wg.Done()
   153  							return
   154  						case <-ctx.Done():
   155  							wg.Done()
   156  							return
   157  						default:
   158  							wg.Add(1)
   159  							go func() {
   160  								ok := testRequest(ctx, t, c)
   161  								wg.Done()
   162  								<-queue
   163  								if ok {
   164  									count := atomic.AddUint64(&reqCount[i], 1)
   165  									if count%10000 == 0 {
   166  										freezeClient(ctx, t, serverRpcClient, clients[i].ID())
   167  									}
   168  								}
   169  							}()
   170  						}
   171  					case <-stop:
   172  						wg.Done()
   173  						return
   174  					case <-ctx.Done():
   175  						wg.Done()
   176  						return
   177  					}
   178  				}
   179  			}()
   180  		}
   181  
   182  		processedSince := func(start []uint64) []uint64 {
   183  			res := make([]uint64, len(reqCount))
   184  			for i := range reqCount {
   185  				res[i] = atomic.LoadUint64(&reqCount[i])
   186  				if start != nil {
   187  					res[i] -= start[i]
   188  				}
   189  			}
   190  			return res
   191  		}
   192  
   193  		weights := make([]float64, len(clients))
   194  		for c := 0; c < 5; c++ {
   195  			setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), freeCap)
   196  			freeIdx = rand.Intn(len(clients))
   197  			var sum float64
   198  			for i := range clients {
   199  				if i == freeIdx {
   200  					weights[i] = 0
   201  				} else {
   202  					weights[i] = rand.Float64()*(1-minRelCap) + minRelCap
   203  				}
   204  				sum += weights[i]
   205  			}
   206  			for i, client := range clients {
   207  				weights[i] *= float64(testCap-freeCap-100) / sum
   208  				capacity := uint64(weights[i])
   209  				if i != freeIdx && capacity < getCapacity(ctx, t, serverRpcClient, client.ID()) {
   210  					setCapacity(ctx, t, serverRpcClient, client.ID(), capacity)
   211  				}
   212  			}
   213  			setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), 0)
   214  			for i, client := range clients {
   215  				capacity := uint64(weights[i])
   216  				if i != freeIdx && capacity > getCapacity(ctx, t, serverRpcClient, client.ID()) {
   217  					setCapacity(ctx, t, serverRpcClient, client.ID(), capacity)
   218  				}
   219  			}
   220  			weights[freeIdx] = float64(freeCap)
   221  			for i := range clients {
   222  				weights[i] /= float64(testCap)
   223  			}
   224  
   225  			time.Sleep(flowcontrol.DecParamDelay)
   226  			t.Log("Starting measurement")
   227  			t.Logf("Relative weights:")
   228  			for i := range clients {
   229  				t.Logf("  %f", weights[i])
   230  			}
   231  			t.Log()
   232  			start := processedSince(nil)
   233  			for {
   234  				select {
   235  				case <-ctx.Done():
   236  					t.Fatalf("Timeout")
   237  				default:
   238  				}
   239  
   240  				_, _, totalCap = getCapacityInfo(ctx, t, serverRpcClient)
   241  				if totalCap < testCap {
   242  					t.Log("Total capacity underrun")
   243  					close(stop)
   244  					wg.Wait()
   245  					return false
   246  				}
   247  
   248  				processed := processedSince(start)
   249  				var avg uint64
   250  				t.Logf("Processed")
   251  				for i, p := range processed {
   252  					t.Logf(" %d", p)
   253  					processed[i] = uint64(float64(p) / weights[i])
   254  					avg += processed[i]
   255  				}
   256  				avg /= uint64(len(processed))
   257  
   258  				if avg >= 10000 {
   259  					var maxDev float64
   260  					for _, p := range processed {
   261  						dev := float64(int64(p-avg)) / float64(avg)
   262  						t.Logf(" %7.4f", dev)
   263  						if dev < 0 {
   264  							dev = -dev
   265  						}
   266  						if dev > maxDev {
   267  							maxDev = dev
   268  						}
   269  					}
   270  					t.Logf("  max deviation: %f  totalCap: %d\n", maxDev, totalCap)
   271  					if maxDev <= testTolerance {
   272  						t.Log("success")
   273  						break
   274  					}
   275  				} else {
   276  					t.Log()
   277  				}
   278  				time.Sleep(time.Millisecond * 200)
   279  			}
   280  		}
   281  
   282  		close(stop)
   283  		wg.Wait()
   284  
   285  		for i, count := range reqCount {
   286  			t.Log("client", i, "processed", count)
   287  		}
   288  		return true
   289  	}) {
   290  		t.Log("restarting test")
   291  	}
   292  }
   293  
   294  func getHead(ctx context.Context, t *testing.T, client *rpc.Client) (uint64, common.Hash) {
   295  	res := make(map[string]interface{})
   296  	if err := client.CallContext(ctx, &res, "eth_getBlockByNumber", "latest", false); err != nil {
   297  		t.Fatalf("Failed to obtain head block: %v", err)
   298  	}
   299  	numStr, ok := res["number"].(string)
   300  	if !ok {
   301  		t.Fatalf("RPC block number field invalid")
   302  	}
   303  	num, err := hexutil.DecodeUint64(numStr)
   304  	if err != nil {
   305  		t.Fatalf("Failed to decode RPC block number: %v", err)
   306  	}
   307  	hashStr, ok := res["hash"].(string)
   308  	if !ok {
   309  		t.Fatalf("RPC block number field invalid")
   310  	}
   311  	hash := common.HexToHash(hashStr)
   312  	return num, hash
   313  }
   314  
   315  func testRequest(ctx context.Context, t *testing.T, client *rpc.Client) bool {
   316  	//res := make(map[string]interface{})
   317  	var res string
   318  	var addr common.Address
   319  	rand.Read(addr[:])
   320  	c, _ := context.WithTimeout(ctx, time.Second*12)
   321  	//	if err := client.CallContext(ctx, &res, "eth_getProof", addr, nil, "latest"); err != nil {
   322  	err := client.CallContext(c, &res, "eth_getBalance", addr, "latest")
   323  	if err != nil {
   324  		t.Log("request error:", err)
   325  	}
   326  	return err == nil
   327  }
   328  
   329  func freezeClient(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID) {
   330  	if err := server.CallContext(ctx, nil, "debug_freezeClient", clientID); err != nil {
   331  		t.Fatalf("Failed to freeze client: %v", err)
   332  	}
   333  
   334  }
   335  
   336  func setCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID, cap uint64) {
   337  	params := make(map[string]interface{})
   338  	params["capacity"] = cap
   339  	if err := server.CallContext(ctx, nil, "les_setClientParams", []enode.ID{clientID}, []string{}, params); err != nil {
   340  		t.Fatalf("Failed to set client capacity: %v", err)
   341  	}
   342  }
   343  
   344  func getCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID) uint64 {
   345  	var res map[enode.ID]map[string]interface{}
   346  	if err := server.CallContext(ctx, &res, "les_clientInfo", []enode.ID{clientID}, []string{}); err != nil {
   347  		t.Fatalf("Failed to get client info: %v", err)
   348  	}
   349  	info, ok := res[clientID]
   350  	if !ok {
   351  		t.Fatalf("Missing client info")
   352  	}
   353  	v, ok := info["capacity"]
   354  	if !ok {
   355  		t.Fatalf("Missing field in client info: capacity")
   356  	}
   357  	vv, ok := v.(float64)
   358  	if !ok {
   359  		t.Fatalf("Failed to decode capacity field")
   360  	}
   361  	return uint64(vv)
   362  }
   363  
   364  func getCapacityInfo(ctx context.Context, t *testing.T, server *rpc.Client) (minCap, freeCap, totalCap uint64) {
   365  	var res map[string]interface{}
   366  	if err := server.CallContext(ctx, &res, "les_serverInfo"); err != nil {
   367  		t.Fatalf("Failed to query server info: %v", err)
   368  	}
   369  	decode := func(s string) uint64 {
   370  		v, ok := res[s]
   371  		if !ok {
   372  			t.Fatalf("Missing field in server info: %s", s)
   373  		}
   374  		vv, ok := v.(float64)
   375  		if !ok {
   376  			t.Fatalf("Failed to decode server info field: %s", s)
   377  		}
   378  		return uint64(vv)
   379  	}
   380  	minCap = decode("minimumCapacity")
   381  	freeCap = decode("freeClientCapacity")
   382  	totalCap = decode("totalCapacity")
   383  	return
   384  }
   385  
   386  func init() {
   387  	flag.Parse()
   388  	// register the Delivery service which will run as a devp2p
   389  	// protocol when using the exec adapter
   390  	adapters.RegisterServices(services)
   391  
   392  	log.PrintOrigins(true)
   393  	log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
   394  }
   395  
   396  var (
   397  	adapter  = flag.String("adapter", "exec", "type of simulation: sim|socket|exec|docker")
   398  	loglevel = flag.Int("loglevel", 0, "verbosity of logs")
   399  	nodes    = flag.Int("nodes", 0, "number of nodes")
   400  )
   401  
   402  var services = adapters.Services{
   403  	"lesclient": newLesClientService,
   404  	"lesserver": newLesServerService,
   405  }
   406  
   407  func NewNetwork() (*simulations.Network, func(), error) {
   408  	adapter, adapterTeardown, err := NewAdapter(*adapter, services)
   409  	if err != nil {
   410  		return nil, adapterTeardown, err
   411  	}
   412  	defaultService := "streamer"
   413  	net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{
   414  		ID:             "0",
   415  		DefaultService: defaultService,
   416  	})
   417  	teardown := func() {
   418  		adapterTeardown()
   419  		net.Shutdown()
   420  	}
   421  
   422  	return net, teardown, nil
   423  }
   424  
   425  func NewAdapter(adapterType string, services adapters.Services) (adapter adapters.NodeAdapter, teardown func(), err error) {
   426  	teardown = func() {}
   427  	switch adapterType {
   428  	case "sim":
   429  		adapter = adapters.NewSimAdapter(services)
   430  		//	case "socket":
   431  		//		adapter = adapters.NewSocketAdapter(services)
   432  	case "exec":
   433  		baseDir, err0 := ioutil.TempDir("", "les-test")
   434  		if err0 != nil {
   435  			return nil, teardown, err0
   436  		}
   437  		teardown = func() { os.RemoveAll(baseDir) }
   438  		adapter = adapters.NewExecAdapter(baseDir)
   439  	/*case "docker":
   440  	adapter, err = adapters.NewDockerAdapter()
   441  	if err != nil {
   442  		return nil, teardown, err
   443  	}*/
   444  	default:
   445  		return nil, teardown, errors.New("adapter needs to be one of sim, socket, exec, docker")
   446  	}
   447  	return adapter, teardown, nil
   448  }
   449  
   450  func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir []string, test func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool) bool {
   451  	net, teardown, err := NewNetwork()
   452  	defer teardown()
   453  	if err != nil {
   454  		t.Fatalf("Failed to create network: %v", err)
   455  	}
   456  	timeout := 1800 * time.Second
   457  	ctx, cancel := context.WithTimeout(context.Background(), timeout)
   458  	defer cancel()
   459  
   460  	servers := make([]*simulations.Node, serverCount)
   461  	clients := make([]*simulations.Node, clientCount)
   462  
   463  	for i := range clients {
   464  		clientconf := adapters.RandomNodeConfig()
   465  		clientconf.Services = []string{"lesclient"}
   466  		if len(clientDir) == clientCount {
   467  			clientconf.DataDir = clientDir[i]
   468  		}
   469  		client, err := net.NewNodeWithConfig(clientconf)
   470  		if err != nil {
   471  			t.Fatalf("Failed to create client: %v", err)
   472  		}
   473  		clients[i] = client
   474  	}
   475  
   476  	for i := range servers {
   477  		serverconf := adapters.RandomNodeConfig()
   478  		serverconf.Services = []string{"lesserver"}
   479  		if len(serverDir) == serverCount {
   480  			serverconf.DataDir = serverDir[i]
   481  		}
   482  		server, err := net.NewNodeWithConfig(serverconf)
   483  		if err != nil {
   484  			t.Fatalf("Failed to create server: %v", err)
   485  		}
   486  		servers[i] = server
   487  	}
   488  
   489  	for _, client := range clients {
   490  		if err := net.Start(client.ID()); err != nil {
   491  			t.Fatalf("Failed to start client node: %v", err)
   492  		}
   493  	}
   494  	for _, server := range servers {
   495  		if err := net.Start(server.ID()); err != nil {
   496  			t.Fatalf("Failed to start server node: %v", err)
   497  		}
   498  	}
   499  
   500  	return test(ctx, net, servers, clients)
   501  }
   502  
   503  func newLesClientService(ctx *adapters.ServiceContext) (node.Service, error) {
   504  	config := eth.DefaultConfig
   505  	config.SyncMode = downloader.LightSync
   506  	config.Ethash.PowMode = ethash.ModeFake
   507  	return New(ctx.NodeContext, &config)
   508  }
   509  
   510  func newLesServerService(ctx *adapters.ServiceContext) (node.Service, error) {
   511  	config := eth.DefaultConfig
   512  	config.SyncMode = downloader.FullSync
   513  	config.LightServ = testServerCapacity
   514  	config.LightPeers = testMaxClients
   515  	ethereum, err := eth.New(ctx.NodeContext, &config)
   516  	if err != nil {
   517  		return nil, err
   518  	}
   519  
   520  	server, err := NewLesServer(ethereum, &config)
   521  	if err != nil {
   522  		return nil, err
   523  	}
   524  	ethereum.AddLesServer(server)
   525  	return ethereum, nil
   526  }