github.com/mprishchepo/go-ethereum@v1.9.7-0.20191031044858-21506be82b68/les/api_test.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"flag"
    23  	"io/ioutil"
    24  	"math/rand"
    25  	"os"
    26  	"sync"
    27  	"sync/atomic"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/Fantom-foundation/go-ethereum/common"
    32  	"github.com/Fantom-foundation/go-ethereum/common/hexutil"
    33  	"github.com/Fantom-foundation/go-ethereum/consensus/ethash"
    34  	"github.com/Fantom-foundation/go-ethereum/eth"
    35  	"github.com/Fantom-foundation/go-ethereum/eth/downloader"
    36  	"github.com/Fantom-foundation/go-ethereum/les/flowcontrol"
    37  	"github.com/Fantom-foundation/go-ethereum/log"
    38  	"github.com/Fantom-foundation/go-ethereum/node"
    39  	"github.com/Fantom-foundation/go-ethereum/p2p/enode"
    40  	"github.com/Fantom-foundation/go-ethereum/p2p/simulations"
    41  	"github.com/Fantom-foundation/go-ethereum/p2p/simulations/adapters"
    42  	"github.com/Fantom-foundation/go-ethereum/rpc"
    43  	"github.com/mattn/go-colorable"
    44  )
    45  
    46  // Additional command line flags for the test binary.
    47  var (
    48  	loglevel   = flag.Int("loglevel", 0, "verbosity of logs")
    49  	simAdapter = flag.String("adapter", "exec", "type of simulation: sim|socket|exec|docker")
    50  )
    51  
    52  func TestMain(m *testing.M) {
    53  	flag.Parse()
    54  	log.PrintOrigins(true)
    55  	log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
    56  	// register the Delivery service which will run as a devp2p
    57  	// protocol when using the exec adapter
    58  	adapters.RegisterServices(services)
    59  	os.Exit(m.Run())
    60  }
    61  
    62  // This test is not meant to be a part of the automatic testing process because it
    63  // runs for a long time and also requires a large database in order to do a meaningful
    64  // request performance test. When testServerDataDir is empty, the test is skipped.
    65  
    66  const (
    67  	testServerDataDir  = "" // should always be empty on the master branch
    68  	testServerCapacity = 200
    69  	testMaxClients     = 10
    70  	testTolerance      = 0.1
    71  	minRelCap          = 0.2
    72  )
    73  
    74  func TestCapacityAPI3(t *testing.T) {
    75  	testCapacityAPI(t, 3)
    76  }
    77  
    78  func TestCapacityAPI6(t *testing.T) {
    79  	testCapacityAPI(t, 6)
    80  }
    81  
    82  func TestCapacityAPI10(t *testing.T) {
    83  	testCapacityAPI(t, 10)
    84  }
    85  
    86  // testCapacityAPI runs an end-to-end simulation test connecting one server with
    87  // a given number of clients. It sets different priority capacities to all clients
    88  // except a randomly selected one which runs in free client mode. All clients send
    89  // similar requests at the maximum allowed rate and the test verifies whether the
    90  // ratio of processed requests is close enough to the ratio of assigned capacities.
    91  // Running multiple rounds with different settings ensures that changing capacity
    92  // while connected and going back and forth between free and priority mode with
    93  // the supplied API calls is also thoroughly tested.
    94  func testCapacityAPI(t *testing.T, clientCount int) {
    95  	// Skip test if no data dir specified
    96  	if testServerDataDir == "" {
    97  		return
    98  	}
    99  	for !testSim(t, 1, clientCount, []string{testServerDataDir}, nil, func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool {
   100  		if len(servers) != 1 {
   101  			t.Fatalf("Invalid number of servers: %d", len(servers))
   102  		}
   103  		server := servers[0]
   104  
   105  		serverRpcClient, err := server.Client()
   106  		if err != nil {
   107  			t.Fatalf("Failed to obtain rpc client: %v", err)
   108  		}
   109  		headNum, headHash := getHead(ctx, t, serverRpcClient)
   110  		minCap, freeCap, totalCap := getCapacityInfo(ctx, t, serverRpcClient)
   111  		testCap := totalCap * 3 / 4
   112  		t.Logf("Server testCap: %d  minCap: %d  head number: %d  head hash: %064x\n", testCap, minCap, headNum, headHash)
   113  		reqMinCap := uint64(float64(testCap) * minRelCap / (minRelCap + float64(len(clients)-1)))
   114  		if minCap > reqMinCap {
   115  			t.Fatalf("Minimum client capacity (%d) bigger than required minimum for this test (%d)", minCap, reqMinCap)
   116  		}
   117  		freeIdx := rand.Intn(len(clients))
   118  
   119  		clientRpcClients := make([]*rpc.Client, len(clients))
   120  		for i, client := range clients {
   121  			var err error
   122  			clientRpcClients[i], err = client.Client()
   123  			if err != nil {
   124  				t.Fatalf("Failed to obtain rpc client: %v", err)
   125  			}
   126  			t.Log("connecting client", i)
   127  			if i != freeIdx {
   128  				setCapacity(ctx, t, serverRpcClient, client.ID(), testCap/uint64(len(clients)))
   129  			}
   130  			net.Connect(client.ID(), server.ID())
   131  
   132  			for {
   133  				select {
   134  				case <-ctx.Done():
   135  					t.Fatalf("Timeout")
   136  				default:
   137  				}
   138  				num, hash := getHead(ctx, t, clientRpcClients[i])
   139  				if num == headNum && hash == headHash {
   140  					t.Log("client", i, "synced")
   141  					break
   142  				}
   143  				time.Sleep(time.Millisecond * 200)
   144  			}
   145  		}
   146  
   147  		var wg sync.WaitGroup
   148  		stop := make(chan struct{})
   149  
   150  		reqCount := make([]uint64, len(clientRpcClients))
   151  
   152  		// Send light request like crazy.
   153  		for i, c := range clientRpcClients {
   154  			wg.Add(1)
   155  			i, c := i, c
   156  			go func() {
   157  				defer wg.Done()
   158  
   159  				queue := make(chan struct{}, 100)
   160  				reqCount[i] = 0
   161  				for {
   162  					select {
   163  					case queue <- struct{}{}:
   164  						select {
   165  						case <-stop:
   166  							return
   167  						case <-ctx.Done():
   168  							return
   169  						default:
   170  							wg.Add(1)
   171  							go func() {
   172  								ok := testRequest(ctx, t, c)
   173  								wg.Done()
   174  								<-queue
   175  								if ok {
   176  									count := atomic.AddUint64(&reqCount[i], 1)
   177  									if count%10000 == 0 {
   178  										freezeClient(ctx, t, serverRpcClient, clients[i].ID())
   179  									}
   180  								}
   181  							}()
   182  						}
   183  					case <-stop:
   184  						return
   185  					case <-ctx.Done():
   186  						return
   187  					}
   188  				}
   189  			}()
   190  		}
   191  
   192  		processedSince := func(start []uint64) []uint64 {
   193  			res := make([]uint64, len(reqCount))
   194  			for i := range reqCount {
   195  				res[i] = atomic.LoadUint64(&reqCount[i])
   196  				if start != nil {
   197  					res[i] -= start[i]
   198  				}
   199  			}
   200  			return res
   201  		}
   202  
   203  		weights := make([]float64, len(clients))
   204  		for c := 0; c < 5; c++ {
   205  			setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), freeCap)
   206  			freeIdx = rand.Intn(len(clients))
   207  			var sum float64
   208  			for i := range clients {
   209  				if i == freeIdx {
   210  					weights[i] = 0
   211  				} else {
   212  					weights[i] = rand.Float64()*(1-minRelCap) + minRelCap
   213  				}
   214  				sum += weights[i]
   215  			}
   216  			for i, client := range clients {
   217  				weights[i] *= float64(testCap-freeCap-100) / sum
   218  				capacity := uint64(weights[i])
   219  				if i != freeIdx && capacity < getCapacity(ctx, t, serverRpcClient, client.ID()) {
   220  					setCapacity(ctx, t, serverRpcClient, client.ID(), capacity)
   221  				}
   222  			}
   223  			setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), 0)
   224  			for i, client := range clients {
   225  				capacity := uint64(weights[i])
   226  				if i != freeIdx && capacity > getCapacity(ctx, t, serverRpcClient, client.ID()) {
   227  					setCapacity(ctx, t, serverRpcClient, client.ID(), capacity)
   228  				}
   229  			}
   230  			weights[freeIdx] = float64(freeCap)
   231  			for i := range clients {
   232  				weights[i] /= float64(testCap)
   233  			}
   234  
   235  			time.Sleep(flowcontrol.DecParamDelay)
   236  			t.Log("Starting measurement")
   237  			t.Logf("Relative weights:")
   238  			for i := range clients {
   239  				t.Logf("  %f", weights[i])
   240  			}
   241  			t.Log()
   242  			start := processedSince(nil)
   243  			for {
   244  				select {
   245  				case <-ctx.Done():
   246  					t.Fatalf("Timeout")
   247  				default:
   248  				}
   249  
   250  				_, _, totalCap = getCapacityInfo(ctx, t, serverRpcClient)
   251  				if totalCap < testCap {
   252  					t.Log("Total capacity underrun")
   253  					close(stop)
   254  					wg.Wait()
   255  					return false
   256  				}
   257  
   258  				processed := processedSince(start)
   259  				var avg uint64
   260  				t.Logf("Processed")
   261  				for i, p := range processed {
   262  					t.Logf(" %d", p)
   263  					processed[i] = uint64(float64(p) / weights[i])
   264  					avg += processed[i]
   265  				}
   266  				avg /= uint64(len(processed))
   267  
   268  				if avg >= 10000 {
   269  					var maxDev float64
   270  					for _, p := range processed {
   271  						dev := float64(int64(p-avg)) / float64(avg)
   272  						t.Logf(" %7.4f", dev)
   273  						if dev < 0 {
   274  							dev = -dev
   275  						}
   276  						if dev > maxDev {
   277  							maxDev = dev
   278  						}
   279  					}
   280  					t.Logf("  max deviation: %f  totalCap: %d\n", maxDev, totalCap)
   281  					if maxDev <= testTolerance {
   282  						t.Log("success")
   283  						break
   284  					}
   285  				} else {
   286  					t.Log()
   287  				}
   288  				time.Sleep(time.Millisecond * 200)
   289  			}
   290  		}
   291  
   292  		close(stop)
   293  		wg.Wait()
   294  
   295  		for i, count := range reqCount {
   296  			t.Log("client", i, "processed", count)
   297  		}
   298  		return true
   299  	}) {
   300  		t.Log("restarting test")
   301  	}
   302  }
   303  
   304  func getHead(ctx context.Context, t *testing.T, client *rpc.Client) (uint64, common.Hash) {
   305  	res := make(map[string]interface{})
   306  	if err := client.CallContext(ctx, &res, "eth_getBlockByNumber", "latest", false); err != nil {
   307  		t.Fatalf("Failed to obtain head block: %v", err)
   308  	}
   309  	numStr, ok := res["number"].(string)
   310  	if !ok {
   311  		t.Fatalf("RPC block number field invalid")
   312  	}
   313  	num, err := hexutil.DecodeUint64(numStr)
   314  	if err != nil {
   315  		t.Fatalf("Failed to decode RPC block number: %v", err)
   316  	}
   317  	hashStr, ok := res["hash"].(string)
   318  	if !ok {
   319  		t.Fatalf("RPC block number field invalid")
   320  	}
   321  	hash := common.HexToHash(hashStr)
   322  	return num, hash
   323  }
   324  
   325  func testRequest(ctx context.Context, t *testing.T, client *rpc.Client) bool {
   326  	var res string
   327  	var addr common.Address
   328  	rand.Read(addr[:])
   329  	c, _ := context.WithTimeout(ctx, time.Second*12)
   330  	err := client.CallContext(c, &res, "eth_getBalance", addr, "latest")
   331  	if err != nil {
   332  		t.Log("request error:", err)
   333  	}
   334  	return err == nil
   335  }
   336  
   337  func freezeClient(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID) {
   338  	if err := server.CallContext(ctx, nil, "debug_freezeClient", clientID); err != nil {
   339  		t.Fatalf("Failed to freeze client: %v", err)
   340  	}
   341  
   342  }
   343  
   344  func setCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID, cap uint64) {
   345  	params := make(map[string]interface{})
   346  	params["capacity"] = cap
   347  	if err := server.CallContext(ctx, nil, "les_setClientParams", []enode.ID{clientID}, []string{}, params); err != nil {
   348  		t.Fatalf("Failed to set client capacity: %v", err)
   349  	}
   350  }
   351  
   352  func getCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID) uint64 {
   353  	var res map[enode.ID]map[string]interface{}
   354  	if err := server.CallContext(ctx, &res, "les_clientInfo", []enode.ID{clientID}, []string{}); err != nil {
   355  		t.Fatalf("Failed to get client info: %v", err)
   356  	}
   357  	info, ok := res[clientID]
   358  	if !ok {
   359  		t.Fatalf("Missing client info")
   360  	}
   361  	v, ok := info["capacity"]
   362  	if !ok {
   363  		t.Fatalf("Missing field in client info: capacity")
   364  	}
   365  	vv, ok := v.(float64)
   366  	if !ok {
   367  		t.Fatalf("Failed to decode capacity field")
   368  	}
   369  	return uint64(vv)
   370  }
   371  
   372  func getCapacityInfo(ctx context.Context, t *testing.T, server *rpc.Client) (minCap, freeCap, totalCap uint64) {
   373  	var res map[string]interface{}
   374  	if err := server.CallContext(ctx, &res, "les_serverInfo"); err != nil {
   375  		t.Fatalf("Failed to query server info: %v", err)
   376  	}
   377  	decode := func(s string) uint64 {
   378  		v, ok := res[s]
   379  		if !ok {
   380  			t.Fatalf("Missing field in server info: %s", s)
   381  		}
   382  		vv, ok := v.(float64)
   383  		if !ok {
   384  			t.Fatalf("Failed to decode server info field: %s", s)
   385  		}
   386  		return uint64(vv)
   387  	}
   388  	minCap = decode("minimumCapacity")
   389  	freeCap = decode("freeClientCapacity")
   390  	totalCap = decode("totalCapacity")
   391  	return
   392  }
   393  
   394  var services = adapters.Services{
   395  	"lesclient": newLesClientService,
   396  	"lesserver": newLesServerService,
   397  }
   398  
   399  func NewNetwork() (*simulations.Network, func(), error) {
   400  	adapter, adapterTeardown, err := NewAdapter(*simAdapter, services)
   401  	if err != nil {
   402  		return nil, adapterTeardown, err
   403  	}
   404  	defaultService := "streamer"
   405  	net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{
   406  		ID:             "0",
   407  		DefaultService: defaultService,
   408  	})
   409  	teardown := func() {
   410  		adapterTeardown()
   411  		net.Shutdown()
   412  	}
   413  	return net, teardown, nil
   414  }
   415  
   416  func NewAdapter(adapterType string, services adapters.Services) (adapter adapters.NodeAdapter, teardown func(), err error) {
   417  	teardown = func() {}
   418  	switch adapterType {
   419  	case "sim":
   420  		adapter = adapters.NewSimAdapter(services)
   421  		//	case "socket":
   422  		//		adapter = adapters.NewSocketAdapter(services)
   423  	case "exec":
   424  		baseDir, err0 := ioutil.TempDir("", "les-test")
   425  		if err0 != nil {
   426  			return nil, teardown, err0
   427  		}
   428  		teardown = func() { os.RemoveAll(baseDir) }
   429  		adapter = adapters.NewExecAdapter(baseDir)
   430  	/*case "docker":
   431  	adapter, err = adapters.NewDockerAdapter()
   432  	if err != nil {
   433  		return nil, teardown, err
   434  	}*/
   435  	default:
   436  		return nil, teardown, errors.New("adapter needs to be one of sim, socket, exec, docker")
   437  	}
   438  	return adapter, teardown, nil
   439  }
   440  
   441  func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir []string, test func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool) bool {
   442  	net, teardown, err := NewNetwork()
   443  	defer teardown()
   444  	if err != nil {
   445  		t.Fatalf("Failed to create network: %v", err)
   446  	}
   447  	timeout := 1800 * time.Second
   448  	ctx, cancel := context.WithTimeout(context.Background(), timeout)
   449  	defer cancel()
   450  
   451  	servers := make([]*simulations.Node, serverCount)
   452  	clients := make([]*simulations.Node, clientCount)
   453  
   454  	for i := range clients {
   455  		clientconf := adapters.RandomNodeConfig()
   456  		clientconf.Services = []string{"lesclient"}
   457  		if len(clientDir) == clientCount {
   458  			clientconf.DataDir = clientDir[i]
   459  		}
   460  		client, err := net.NewNodeWithConfig(clientconf)
   461  		if err != nil {
   462  			t.Fatalf("Failed to create client: %v", err)
   463  		}
   464  		clients[i] = client
   465  	}
   466  
   467  	for i := range servers {
   468  		serverconf := adapters.RandomNodeConfig()
   469  		serverconf.Services = []string{"lesserver"}
   470  		if len(serverDir) == serverCount {
   471  			serverconf.DataDir = serverDir[i]
   472  		}
   473  		server, err := net.NewNodeWithConfig(serverconf)
   474  		if err != nil {
   475  			t.Fatalf("Failed to create server: %v", err)
   476  		}
   477  		servers[i] = server
   478  	}
   479  
   480  	for _, client := range clients {
   481  		if err := net.Start(client.ID()); err != nil {
   482  			t.Fatalf("Failed to start client node: %v", err)
   483  		}
   484  	}
   485  	for _, server := range servers {
   486  		if err := net.Start(server.ID()); err != nil {
   487  			t.Fatalf("Failed to start server node: %v", err)
   488  		}
   489  	}
   490  
   491  	return test(ctx, net, servers, clients)
   492  }
   493  
   494  func newLesClientService(ctx *adapters.ServiceContext) (node.Service, error) {
   495  	config := eth.DefaultConfig
   496  	config.SyncMode = downloader.LightSync
   497  	config.Ethash.PowMode = ethash.ModeFake
   498  	return New(ctx.NodeContext, &config)
   499  }
   500  
   501  func newLesServerService(ctx *adapters.ServiceContext) (node.Service, error) {
   502  	config := eth.DefaultConfig
   503  	config.SyncMode = downloader.FullSync
   504  	config.LightServ = testServerCapacity
   505  	config.LightPeers = testMaxClients
   506  	ethereum, err := eth.New(ctx.NodeContext, &config)
   507  	if err != nil {
   508  		return nil, err
   509  	}
   510  	server, err := NewLesServer(ethereum, &config)
   511  	if err != nil {
   512  		return nil, err
   513  	}
   514  	ethereum.AddLesServer(server)
   515  	return ethereum, nil
   516  }