github.com/hardtosaygoodbye/go-ethereum@v1.10.16-0.20220122011429-97003b9e6c15/les/api_test.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"flag"
    23  	"io/ioutil"
    24  	"math/rand"
    25  	"os"
    26  	"sync"
    27  	"sync/atomic"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/hardtosaygoodbye/go-ethereum/common"
    32  	"github.com/hardtosaygoodbye/go-ethereum/common/hexutil"
    33  	"github.com/hardtosaygoodbye/go-ethereum/consensus/ethash"
    34  	"github.com/hardtosaygoodbye/go-ethereum/eth"
    35  	ethdownloader "github.com/hardtosaygoodbye/go-ethereum/eth/downloader"
    36  	"github.com/hardtosaygoodbye/go-ethereum/eth/ethconfig"
    37  	"github.com/hardtosaygoodbye/go-ethereum/les/downloader"
    38  	"github.com/hardtosaygoodbye/go-ethereum/les/flowcontrol"
    39  	"github.com/hardtosaygoodbye/go-ethereum/log"
    40  	"github.com/hardtosaygoodbye/go-ethereum/node"
    41  	"github.com/hardtosaygoodbye/go-ethereum/p2p/enode"
    42  	"github.com/hardtosaygoodbye/go-ethereum/p2p/simulations"
    43  	"github.com/hardtosaygoodbye/go-ethereum/p2p/simulations/adapters"
    44  	"github.com/hardtosaygoodbye/go-ethereum/rpc"
    45  	"github.com/mattn/go-colorable"
    46  )
    47  
    48  // Additional command line flags for the test binary.
    49  var (
    50  	loglevel   = flag.Int("loglevel", 0, "verbosity of logs")
    51  	simAdapter = flag.String("adapter", "exec", "type of simulation: sim|socket|exec|docker")
    52  )
    53  
    54  func TestMain(m *testing.M) {
    55  	flag.Parse()
    56  	log.PrintOrigins(true)
    57  	log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
    58  	// register the Delivery service which will run as a devp2p
    59  	// protocol when using the exec adapter
    60  	adapters.RegisterLifecycles(services)
    61  	os.Exit(m.Run())
    62  }
    63  
    64  // This test is not meant to be a part of the automatic testing process because it
    65  // runs for a long time and also requires a large database in order to do a meaningful
    66  // request performance test. When testServerDataDir is empty, the test is skipped.
    67  
    68  const (
    69  	testServerDataDir  = "" // should always be empty on the master branch
    70  	testServerCapacity = 200
    71  	testMaxClients     = 10
    72  	testTolerance      = 0.1
    73  	minRelCap          = 0.2
    74  )
    75  
    76  func TestCapacityAPI3(t *testing.T) {
    77  	testCapacityAPI(t, 3)
    78  }
    79  
    80  func TestCapacityAPI6(t *testing.T) {
    81  	testCapacityAPI(t, 6)
    82  }
    83  
    84  func TestCapacityAPI10(t *testing.T) {
    85  	testCapacityAPI(t, 10)
    86  }
    87  
    88  // testCapacityAPI runs an end-to-end simulation test connecting one server with
    89  // a given number of clients. It sets different priority capacities to all clients
    90  // except a randomly selected one which runs in free client mode. All clients send
    91  // similar requests at the maximum allowed rate and the test verifies whether the
    92  // ratio of processed requests is close enough to the ratio of assigned capacities.
    93  // Running multiple rounds with different settings ensures that changing capacity
    94  // while connected and going back and forth between free and priority mode with
    95  // the supplied API calls is also thoroughly tested.
    96  func testCapacityAPI(t *testing.T, clientCount int) {
    97  	// Skip test if no data dir specified
    98  	if testServerDataDir == "" {
    99  		return
   100  	}
   101  	for !testSim(t, 1, clientCount, []string{testServerDataDir}, nil, func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool {
   102  		if len(servers) != 1 {
   103  			t.Fatalf("Invalid number of servers: %d", len(servers))
   104  		}
   105  		server := servers[0]
   106  
   107  		serverRpcClient, err := server.Client()
   108  		if err != nil {
   109  			t.Fatalf("Failed to obtain rpc client: %v", err)
   110  		}
   111  		headNum, headHash := getHead(ctx, t, serverRpcClient)
   112  		minCap, totalCap := getCapacityInfo(ctx, t, serverRpcClient)
   113  		testCap := totalCap * 3 / 4
   114  		t.Logf("Server testCap: %d  minCap: %d  head number: %d  head hash: %064x\n", testCap, minCap, headNum, headHash)
   115  		reqMinCap := uint64(float64(testCap) * minRelCap / (minRelCap + float64(len(clients)-1)))
   116  		if minCap > reqMinCap {
   117  			t.Fatalf("Minimum client capacity (%d) bigger than required minimum for this test (%d)", minCap, reqMinCap)
   118  		}
   119  		freeIdx := rand.Intn(len(clients))
   120  
   121  		clientRpcClients := make([]*rpc.Client, len(clients))
   122  		for i, client := range clients {
   123  			var err error
   124  			clientRpcClients[i], err = client.Client()
   125  			if err != nil {
   126  				t.Fatalf("Failed to obtain rpc client: %v", err)
   127  			}
   128  			t.Log("connecting client", i)
   129  			if i != freeIdx {
   130  				setCapacity(ctx, t, serverRpcClient, client.ID(), testCap/uint64(len(clients)))
   131  			}
   132  			net.Connect(client.ID(), server.ID())
   133  
   134  			for {
   135  				select {
   136  				case <-ctx.Done():
   137  					t.Fatalf("Timeout")
   138  				default:
   139  				}
   140  				num, hash := getHead(ctx, t, clientRpcClients[i])
   141  				if num == headNum && hash == headHash {
   142  					t.Log("client", i, "synced")
   143  					break
   144  				}
   145  				time.Sleep(time.Millisecond * 200)
   146  			}
   147  		}
   148  
   149  		var wg sync.WaitGroup
   150  		stop := make(chan struct{})
   151  
   152  		reqCount := make([]uint64, len(clientRpcClients))
   153  
   154  		// Send light request like crazy.
   155  		for i, c := range clientRpcClients {
   156  			wg.Add(1)
   157  			i, c := i, c
   158  			go func() {
   159  				defer wg.Done()
   160  
   161  				queue := make(chan struct{}, 100)
   162  				reqCount[i] = 0
   163  				for {
   164  					select {
   165  					case queue <- struct{}{}:
   166  						select {
   167  						case <-stop:
   168  							return
   169  						case <-ctx.Done():
   170  							return
   171  						default:
   172  							wg.Add(1)
   173  							go func() {
   174  								ok := testRequest(ctx, t, c)
   175  								wg.Done()
   176  								<-queue
   177  								if ok {
   178  									count := atomic.AddUint64(&reqCount[i], 1)
   179  									if count%10000 == 0 {
   180  										freezeClient(ctx, t, serverRpcClient, clients[i].ID())
   181  									}
   182  								}
   183  							}()
   184  						}
   185  					case <-stop:
   186  						return
   187  					case <-ctx.Done():
   188  						return
   189  					}
   190  				}
   191  			}()
   192  		}
   193  
   194  		processedSince := func(start []uint64) []uint64 {
   195  			res := make([]uint64, len(reqCount))
   196  			for i := range reqCount {
   197  				res[i] = atomic.LoadUint64(&reqCount[i])
   198  				if start != nil {
   199  					res[i] -= start[i]
   200  				}
   201  			}
   202  			return res
   203  		}
   204  
   205  		weights := make([]float64, len(clients))
   206  		for c := 0; c < 5; c++ {
   207  			setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), minCap)
   208  			freeIdx = rand.Intn(len(clients))
   209  			var sum float64
   210  			for i := range clients {
   211  				if i == freeIdx {
   212  					weights[i] = 0
   213  				} else {
   214  					weights[i] = rand.Float64()*(1-minRelCap) + minRelCap
   215  				}
   216  				sum += weights[i]
   217  			}
   218  			for i, client := range clients {
   219  				weights[i] *= float64(testCap-minCap-100) / sum
   220  				capacity := uint64(weights[i])
   221  				if i != freeIdx && capacity < getCapacity(ctx, t, serverRpcClient, client.ID()) {
   222  					setCapacity(ctx, t, serverRpcClient, client.ID(), capacity)
   223  				}
   224  			}
   225  			setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), 0)
   226  			for i, client := range clients {
   227  				capacity := uint64(weights[i])
   228  				if i != freeIdx && capacity > getCapacity(ctx, t, serverRpcClient, client.ID()) {
   229  					setCapacity(ctx, t, serverRpcClient, client.ID(), capacity)
   230  				}
   231  			}
   232  			weights[freeIdx] = float64(minCap)
   233  			for i := range clients {
   234  				weights[i] /= float64(testCap)
   235  			}
   236  
   237  			time.Sleep(flowcontrol.DecParamDelay)
   238  			t.Log("Starting measurement")
   239  			t.Logf("Relative weights:")
   240  			for i := range clients {
   241  				t.Logf("  %f", weights[i])
   242  			}
   243  			t.Log()
   244  			start := processedSince(nil)
   245  			for {
   246  				select {
   247  				case <-ctx.Done():
   248  					t.Fatalf("Timeout")
   249  				default:
   250  				}
   251  
   252  				_, totalCap = getCapacityInfo(ctx, t, serverRpcClient)
   253  				if totalCap < testCap {
   254  					t.Log("Total capacity underrun")
   255  					close(stop)
   256  					wg.Wait()
   257  					return false
   258  				}
   259  
   260  				processed := processedSince(start)
   261  				var avg uint64
   262  				t.Logf("Processed")
   263  				for i, p := range processed {
   264  					t.Logf(" %d", p)
   265  					processed[i] = uint64(float64(p) / weights[i])
   266  					avg += processed[i]
   267  				}
   268  				avg /= uint64(len(processed))
   269  
   270  				if avg >= 10000 {
   271  					var maxDev float64
   272  					for _, p := range processed {
   273  						dev := float64(int64(p-avg)) / float64(avg)
   274  						t.Logf(" %7.4f", dev)
   275  						if dev < 0 {
   276  							dev = -dev
   277  						}
   278  						if dev > maxDev {
   279  							maxDev = dev
   280  						}
   281  					}
   282  					t.Logf("  max deviation: %f  totalCap: %d\n", maxDev, totalCap)
   283  					if maxDev <= testTolerance {
   284  						t.Log("success")
   285  						break
   286  					}
   287  				} else {
   288  					t.Log()
   289  				}
   290  				time.Sleep(time.Millisecond * 200)
   291  			}
   292  		}
   293  
   294  		close(stop)
   295  		wg.Wait()
   296  
   297  		for i, count := range reqCount {
   298  			t.Log("client", i, "processed", count)
   299  		}
   300  		return true
   301  	}) {
   302  		t.Log("restarting test")
   303  	}
   304  }
   305  
   306  func getHead(ctx context.Context, t *testing.T, client *rpc.Client) (uint64, common.Hash) {
   307  	res := make(map[string]interface{})
   308  	if err := client.CallContext(ctx, &res, "eth_getBlockByNumber", "latest", false); err != nil {
   309  		t.Fatalf("Failed to obtain head block: %v", err)
   310  	}
   311  	numStr, ok := res["number"].(string)
   312  	if !ok {
   313  		t.Fatalf("RPC block number field invalid")
   314  	}
   315  	num, err := hexutil.DecodeUint64(numStr)
   316  	if err != nil {
   317  		t.Fatalf("Failed to decode RPC block number: %v", err)
   318  	}
   319  	hashStr, ok := res["hash"].(string)
   320  	if !ok {
   321  		t.Fatalf("RPC block number field invalid")
   322  	}
   323  	hash := common.HexToHash(hashStr)
   324  	return num, hash
   325  }
   326  
   327  func testRequest(ctx context.Context, t *testing.T, client *rpc.Client) bool {
   328  	var res string
   329  	var addr common.Address
   330  	rand.Read(addr[:])
   331  	c, cancel := context.WithTimeout(ctx, time.Second*12)
   332  	defer cancel()
   333  	err := client.CallContext(c, &res, "eth_getBalance", addr, "latest")
   334  	if err != nil {
   335  		t.Log("request error:", err)
   336  	}
   337  	return err == nil
   338  }
   339  
   340  func freezeClient(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID) {
   341  	if err := server.CallContext(ctx, nil, "debug_freezeClient", clientID); err != nil {
   342  		t.Fatalf("Failed to freeze client: %v", err)
   343  	}
   344  
   345  }
   346  
   347  func setCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID, cap uint64) {
   348  	params := make(map[string]interface{})
   349  	params["capacity"] = cap
   350  	if err := server.CallContext(ctx, nil, "les_setClientParams", []enode.ID{clientID}, []string{}, params); err != nil {
   351  		t.Fatalf("Failed to set client capacity: %v", err)
   352  	}
   353  }
   354  
   355  func getCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID) uint64 {
   356  	var res map[enode.ID]map[string]interface{}
   357  	if err := server.CallContext(ctx, &res, "les_clientInfo", []enode.ID{clientID}, []string{}); err != nil {
   358  		t.Fatalf("Failed to get client info: %v", err)
   359  	}
   360  	info, ok := res[clientID]
   361  	if !ok {
   362  		t.Fatalf("Missing client info")
   363  	}
   364  	v, ok := info["capacity"]
   365  	if !ok {
   366  		t.Fatalf("Missing field in client info: capacity")
   367  	}
   368  	vv, ok := v.(float64)
   369  	if !ok {
   370  		t.Fatalf("Failed to decode capacity field")
   371  	}
   372  	return uint64(vv)
   373  }
   374  
   375  func getCapacityInfo(ctx context.Context, t *testing.T, server *rpc.Client) (minCap, totalCap uint64) {
   376  	var res map[string]interface{}
   377  	if err := server.CallContext(ctx, &res, "les_serverInfo"); err != nil {
   378  		t.Fatalf("Failed to query server info: %v", err)
   379  	}
   380  	decode := func(s string) uint64 {
   381  		v, ok := res[s]
   382  		if !ok {
   383  			t.Fatalf("Missing field in server info: %s", s)
   384  		}
   385  		vv, ok := v.(float64)
   386  		if !ok {
   387  			t.Fatalf("Failed to decode server info field: %s", s)
   388  		}
   389  		return uint64(vv)
   390  	}
   391  	minCap = decode("minimumCapacity")
   392  	totalCap = decode("totalCapacity")
   393  	return
   394  }
   395  
   396  var services = adapters.LifecycleConstructors{
   397  	"lesclient": newLesClientService,
   398  	"lesserver": newLesServerService,
   399  }
   400  
   401  func NewNetwork() (*simulations.Network, func(), error) {
   402  	adapter, adapterTeardown, err := NewAdapter(*simAdapter, services)
   403  	if err != nil {
   404  		return nil, adapterTeardown, err
   405  	}
   406  	defaultService := "streamer"
   407  	net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{
   408  		ID:             "0",
   409  		DefaultService: defaultService,
   410  	})
   411  	teardown := func() {
   412  		adapterTeardown()
   413  		net.Shutdown()
   414  	}
   415  	return net, teardown, nil
   416  }
   417  
   418  func NewAdapter(adapterType string, services adapters.LifecycleConstructors) (adapter adapters.NodeAdapter, teardown func(), err error) {
   419  	teardown = func() {}
   420  	switch adapterType {
   421  	case "sim":
   422  		adapter = adapters.NewSimAdapter(services)
   423  		//	case "socket":
   424  		//		adapter = adapters.NewSocketAdapter(services)
   425  	case "exec":
   426  		baseDir, err0 := ioutil.TempDir("", "les-test")
   427  		if err0 != nil {
   428  			return nil, teardown, err0
   429  		}
   430  		teardown = func() { os.RemoveAll(baseDir) }
   431  		adapter = adapters.NewExecAdapter(baseDir)
   432  	/*case "docker":
   433  	adapter, err = adapters.NewDockerAdapter()
   434  	if err != nil {
   435  		return nil, teardown, err
   436  	}*/
   437  	default:
   438  		return nil, teardown, errors.New("adapter needs to be one of sim, socket, exec, docker")
   439  	}
   440  	return adapter, teardown, nil
   441  }
   442  
   443  func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir []string, test func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool) bool {
   444  	net, teardown, err := NewNetwork()
   445  	defer teardown()
   446  	if err != nil {
   447  		t.Fatalf("Failed to create network: %v", err)
   448  	}
   449  	timeout := 1800 * time.Second
   450  	ctx, cancel := context.WithTimeout(context.Background(), timeout)
   451  	defer cancel()
   452  
   453  	servers := make([]*simulations.Node, serverCount)
   454  	clients := make([]*simulations.Node, clientCount)
   455  
   456  	for i := range clients {
   457  		clientconf := adapters.RandomNodeConfig()
   458  		clientconf.Lifecycles = []string{"lesclient"}
   459  		if len(clientDir) == clientCount {
   460  			clientconf.DataDir = clientDir[i]
   461  		}
   462  		client, err := net.NewNodeWithConfig(clientconf)
   463  		if err != nil {
   464  			t.Fatalf("Failed to create client: %v", err)
   465  		}
   466  		clients[i] = client
   467  	}
   468  
   469  	for i := range servers {
   470  		serverconf := adapters.RandomNodeConfig()
   471  		serverconf.Lifecycles = []string{"lesserver"}
   472  		if len(serverDir) == serverCount {
   473  			serverconf.DataDir = serverDir[i]
   474  		}
   475  		server, err := net.NewNodeWithConfig(serverconf)
   476  		if err != nil {
   477  			t.Fatalf("Failed to create server: %v", err)
   478  		}
   479  		servers[i] = server
   480  	}
   481  
   482  	for _, client := range clients {
   483  		if err := net.Start(client.ID()); err != nil {
   484  			t.Fatalf("Failed to start client node: %v", err)
   485  		}
   486  	}
   487  	for _, server := range servers {
   488  		if err := net.Start(server.ID()); err != nil {
   489  			t.Fatalf("Failed to start server node: %v", err)
   490  		}
   491  	}
   492  
   493  	return test(ctx, net, servers, clients)
   494  }
   495  
   496  func newLesClientService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
   497  	config := ethconfig.Defaults
   498  	config.SyncMode = (ethdownloader.SyncMode)(downloader.LightSync)
   499  	config.Ethash.PowMode = ethash.ModeFake
   500  	return New(stack, &config)
   501  }
   502  
   503  func newLesServerService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
   504  	config := ethconfig.Defaults
   505  	config.SyncMode = (ethdownloader.SyncMode)(downloader.FullSync)
   506  	config.LightServ = testServerCapacity
   507  	config.LightPeers = testMaxClients
   508  	ethereum, err := eth.New(stack, &config)
   509  	if err != nil {
   510  		return nil, err
   511  	}
   512  	_, err = NewLesServer(stack, ethereum, &config)
   513  	if err != nil {
   514  		return nil, err
   515  	}
   516  	return ethereum, nil
   517  }