github.com/bloxroute-labs/bor@v0.1.4/les/api_test.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"flag"
    23  	"fmt"
    24  	"io/ioutil"
    25  	"math/rand"
    26  	"os"
    27  	"sync"
    28  	"sync/atomic"
    29  	"testing"
    30  	"time"
    31  
    32  	"github.com/maticnetwork/bor/common"
    33  	"github.com/maticnetwork/bor/common/hexutil"
    34  	"github.com/maticnetwork/bor/consensus/ethash"
    35  	"github.com/maticnetwork/bor/eth"
    36  	"github.com/maticnetwork/bor/eth/downloader"
    37  	"github.com/maticnetwork/bor/les/flowcontrol"
    38  	"github.com/maticnetwork/bor/log"
    39  	"github.com/maticnetwork/bor/node"
    40  	"github.com/maticnetwork/bor/p2p/enode"
    41  	"github.com/maticnetwork/bor/p2p/simulations"
    42  	"github.com/maticnetwork/bor/p2p/simulations/adapters"
    43  	"github.com/maticnetwork/bor/rpc"
    44  	colorable "github.com/mattn/go-colorable"
    45  )
    46  
    47  /*
    48  This test is not meant to be a part of the automatic testing process because it
    49  runs for a long time and also requires a large database in order to do a meaningful
    50  request performance test. When testServerDataDir is empty, the test is skipped.
    51  */
    52  
    53  const (
    54  	testServerDataDir  = "" // should always be empty on the master branch
    55  	testServerCapacity = 200
    56  	testMaxClients     = 10
    57  	testTolerance      = 0.1
    58  	minRelCap          = 0.2
    59  )
    60  
    61  func TestCapacityAPI3(t *testing.T) {
    62  	testCapacityAPI(t, 3)
    63  }
    64  
    65  func TestCapacityAPI6(t *testing.T) {
    66  	testCapacityAPI(t, 6)
    67  }
    68  
    69  func TestCapacityAPI10(t *testing.T) {
    70  	testCapacityAPI(t, 10)
    71  }
    72  
    73  // testCapacityAPI runs an end-to-end simulation test connecting one server with
    74  // a given number of clients. It sets different priority capacities to all clients
    75  // except a randomly selected one which runs in free client mode. All clients send
    76  // similar requests at the maximum allowed rate and the test verifies whether the
    77  // ratio of processed requests is close enough to the ratio of assigned capacities.
    78  // Running multiple rounds with different settings ensures that changing capacity
    79  // while connected and going back and forth between free and priority mode with
    80  // the supplied API calls is also thoroughly tested.
    81  func testCapacityAPI(t *testing.T, clientCount int) {
    82  	if testServerDataDir == "" {
    83  		// Skip test if no data dir specified
    84  		return
    85  	}
    86  
    87  	for !testSim(t, 1, clientCount, []string{testServerDataDir}, nil, func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool {
    88  		if len(servers) != 1 {
    89  			t.Fatalf("Invalid number of servers: %d", len(servers))
    90  		}
    91  		server := servers[0]
    92  
    93  		clientRpcClients := make([]*rpc.Client, len(clients))
    94  
    95  		serverRpcClient, err := server.Client()
    96  		if err != nil {
    97  			t.Fatalf("Failed to obtain rpc client: %v", err)
    98  		}
    99  		headNum, headHash := getHead(ctx, t, serverRpcClient)
   100  		totalCap := getTotalCap(ctx, t, serverRpcClient)
   101  		minCap := getMinCap(ctx, t, serverRpcClient)
   102  		testCap := totalCap * 3 / 4
   103  		fmt.Printf("Server testCap: %d  minCap: %d  head number: %d  head hash: %064x\n", testCap, minCap, headNum, headHash)
   104  		reqMinCap := uint64(float64(testCap) * minRelCap / (minRelCap + float64(len(clients)-1)))
   105  		if minCap > reqMinCap {
   106  			t.Fatalf("Minimum client capacity (%d) bigger than required minimum for this test (%d)", minCap, reqMinCap)
   107  		}
   108  
   109  		freeIdx := rand.Intn(len(clients))
   110  		freeCap := getFreeCap(ctx, t, serverRpcClient)
   111  
   112  		for i, client := range clients {
   113  			var err error
   114  			clientRpcClients[i], err = client.Client()
   115  			if err != nil {
   116  				t.Fatalf("Failed to obtain rpc client: %v", err)
   117  			}
   118  
   119  			fmt.Println("connecting client", i)
   120  			if i != freeIdx {
   121  				setCapacity(ctx, t, serverRpcClient, client.ID(), testCap/uint64(len(clients)))
   122  			}
   123  			net.Connect(client.ID(), server.ID())
   124  
   125  			for {
   126  				select {
   127  				case <-ctx.Done():
   128  					t.Fatalf("Timeout")
   129  				default:
   130  				}
   131  				num, hash := getHead(ctx, t, clientRpcClients[i])
   132  				if num == headNum && hash == headHash {
   133  					fmt.Println("client", i, "synced")
   134  					break
   135  				}
   136  				time.Sleep(time.Millisecond * 200)
   137  			}
   138  		}
   139  
   140  		var wg sync.WaitGroup
   141  		stop := make(chan struct{})
   142  
   143  		reqCount := make([]uint64, len(clientRpcClients))
   144  
   145  		for i, c := range clientRpcClients {
   146  			wg.Add(1)
   147  			i, c := i, c
   148  			go func() {
   149  				queue := make(chan struct{}, 100)
   150  				var count uint64
   151  				for {
   152  					select {
   153  					case queue <- struct{}{}:
   154  						select {
   155  						case <-stop:
   156  							wg.Done()
   157  							return
   158  						case <-ctx.Done():
   159  							wg.Done()
   160  							return
   161  						default:
   162  							wg.Add(1)
   163  							go func() {
   164  								ok := testRequest(ctx, t, c)
   165  								wg.Done()
   166  								<-queue
   167  								if ok {
   168  									count++
   169  									atomic.StoreUint64(&reqCount[i], count)
   170  								}
   171  							}()
   172  						}
   173  					case <-stop:
   174  						wg.Done()
   175  						return
   176  					case <-ctx.Done():
   177  						wg.Done()
   178  						return
   179  					}
   180  				}
   181  			}()
   182  		}
   183  
   184  		processedSince := func(start []uint64) []uint64 {
   185  			res := make([]uint64, len(reqCount))
   186  			for i := range reqCount {
   187  				res[i] = atomic.LoadUint64(&reqCount[i])
   188  				if start != nil {
   189  					res[i] -= start[i]
   190  				}
   191  			}
   192  			return res
   193  		}
   194  
   195  		weights := make([]float64, len(clients))
   196  		for c := 0; c < 5; c++ {
   197  			setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), freeCap)
   198  			freeIdx = rand.Intn(len(clients))
   199  			var sum float64
   200  			for i := range clients {
   201  				if i == freeIdx {
   202  					weights[i] = 0
   203  				} else {
   204  					weights[i] = rand.Float64()*(1-minRelCap) + minRelCap
   205  				}
   206  				sum += weights[i]
   207  			}
   208  			for i, client := range clients {
   209  				weights[i] *= float64(testCap-freeCap-100) / sum
   210  				capacity := uint64(weights[i])
   211  				if i != freeIdx && capacity < getCapacity(ctx, t, serverRpcClient, client.ID()) {
   212  					setCapacity(ctx, t, serverRpcClient, client.ID(), capacity)
   213  				}
   214  			}
   215  			setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), 0)
   216  			for i, client := range clients {
   217  				capacity := uint64(weights[i])
   218  				if i != freeIdx && capacity > getCapacity(ctx, t, serverRpcClient, client.ID()) {
   219  					setCapacity(ctx, t, serverRpcClient, client.ID(), capacity)
   220  				}
   221  			}
   222  			weights[freeIdx] = float64(freeCap)
   223  			for i := range clients {
   224  				weights[i] /= float64(testCap)
   225  			}
   226  
   227  			time.Sleep(flowcontrol.DecParamDelay)
   228  			fmt.Println("Starting measurement")
   229  			fmt.Printf("Relative weights:")
   230  			for i := range clients {
   231  				fmt.Printf("  %f", weights[i])
   232  			}
   233  			fmt.Println()
   234  			start := processedSince(nil)
   235  			for {
   236  				select {
   237  				case <-ctx.Done():
   238  					t.Fatalf("Timeout")
   239  				default:
   240  				}
   241  
   242  				totalCap = getTotalCap(ctx, t, serverRpcClient)
   243  				if totalCap < testCap {
   244  					fmt.Println("Total capacity underrun")
   245  					close(stop)
   246  					wg.Wait()
   247  					return false
   248  				}
   249  
   250  				processed := processedSince(start)
   251  				var avg uint64
   252  				fmt.Printf("Processed")
   253  				for i, p := range processed {
   254  					fmt.Printf(" %d", p)
   255  					processed[i] = uint64(float64(p) / weights[i])
   256  					avg += processed[i]
   257  				}
   258  				avg /= uint64(len(processed))
   259  
   260  				if avg >= 10000 {
   261  					var maxDev float64
   262  					for _, p := range processed {
   263  						dev := float64(int64(p-avg)) / float64(avg)
   264  						fmt.Printf(" %7.4f", dev)
   265  						if dev < 0 {
   266  							dev = -dev
   267  						}
   268  						if dev > maxDev {
   269  							maxDev = dev
   270  						}
   271  					}
   272  					fmt.Printf("  max deviation: %f  totalCap: %d\n", maxDev, totalCap)
   273  					if maxDev <= testTolerance {
   274  						fmt.Println("success")
   275  						break
   276  					}
   277  				} else {
   278  					fmt.Println()
   279  				}
   280  				time.Sleep(time.Millisecond * 200)
   281  			}
   282  		}
   283  
   284  		close(stop)
   285  		wg.Wait()
   286  
   287  		for i, count := range reqCount {
   288  			fmt.Println("client", i, "processed", count)
   289  		}
   290  		return true
   291  	}) {
   292  		fmt.Println("restarting test")
   293  	}
   294  }
   295  
   296  func getHead(ctx context.Context, t *testing.T, client *rpc.Client) (uint64, common.Hash) {
   297  	res := make(map[string]interface{})
   298  	if err := client.CallContext(ctx, &res, "eth_getBlockByNumber", "latest", false); err != nil {
   299  		t.Fatalf("Failed to obtain head block: %v", err)
   300  	}
   301  	numStr, ok := res["number"].(string)
   302  	if !ok {
   303  		t.Fatalf("RPC block number field invalid")
   304  	}
   305  	num, err := hexutil.DecodeUint64(numStr)
   306  	if err != nil {
   307  		t.Fatalf("Failed to decode RPC block number: %v", err)
   308  	}
   309  	hashStr, ok := res["hash"].(string)
   310  	if !ok {
   311  		t.Fatalf("RPC block number field invalid")
   312  	}
   313  	hash := common.HexToHash(hashStr)
   314  	return num, hash
   315  }
   316  
   317  func testRequest(ctx context.Context, t *testing.T, client *rpc.Client) bool {
   318  	//res := make(map[string]interface{})
   319  	var res string
   320  	var addr common.Address
   321  	rand.Read(addr[:])
   322  	c, _ := context.WithTimeout(ctx, time.Second*12)
   323  	//	if err := client.CallContext(ctx, &res, "eth_getProof", addr, nil, "latest"); err != nil {
   324  	err := client.CallContext(c, &res, "eth_getBalance", addr, "latest")
   325  	if err != nil {
   326  		fmt.Println("request error:", err)
   327  	}
   328  	return err == nil
   329  }
   330  
   331  func setCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID, cap uint64) {
   332  	if err := server.CallContext(ctx, nil, "les_setClientCapacity", clientID, cap); err != nil {
   333  		t.Fatalf("Failed to set client capacity: %v", err)
   334  	}
   335  }
   336  
   337  func getCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID) uint64 {
   338  	var s string
   339  	if err := server.CallContext(ctx, &s, "les_getClientCapacity", clientID); err != nil {
   340  		t.Fatalf("Failed to get client capacity: %v", err)
   341  	}
   342  	cap, err := hexutil.DecodeUint64(s)
   343  	if err != nil {
   344  		t.Fatalf("Failed to decode client capacity: %v", err)
   345  	}
   346  	return cap
   347  }
   348  
   349  func getTotalCap(ctx context.Context, t *testing.T, server *rpc.Client) uint64 {
   350  	var s string
   351  	if err := server.CallContext(ctx, &s, "les_totalCapacity"); err != nil {
   352  		t.Fatalf("Failed to query total capacity: %v", err)
   353  	}
   354  	total, err := hexutil.DecodeUint64(s)
   355  	if err != nil {
   356  		t.Fatalf("Failed to decode total capacity: %v", err)
   357  	}
   358  	return total
   359  }
   360  
   361  func getMinCap(ctx context.Context, t *testing.T, server *rpc.Client) uint64 {
   362  	var s string
   363  	if err := server.CallContext(ctx, &s, "les_minimumCapacity"); err != nil {
   364  		t.Fatalf("Failed to query minimum capacity: %v", err)
   365  	}
   366  	min, err := hexutil.DecodeUint64(s)
   367  	if err != nil {
   368  		t.Fatalf("Failed to decode minimum capacity: %v", err)
   369  	}
   370  	return min
   371  }
   372  
   373  func getFreeCap(ctx context.Context, t *testing.T, server *rpc.Client) uint64 {
   374  	var s string
   375  	if err := server.CallContext(ctx, &s, "les_freeClientCapacity"); err != nil {
   376  		t.Fatalf("Failed to query free client capacity: %v", err)
   377  	}
   378  	free, err := hexutil.DecodeUint64(s)
   379  	if err != nil {
   380  		t.Fatalf("Failed to decode free client capacity: %v", err)
   381  	}
   382  	return free
   383  }
   384  
   385  func init() {
   386  	flag.Parse()
   387  	// register the Delivery service which will run as a devp2p
   388  	// protocol when using the exec adapter
   389  	adapters.RegisterServices(services)
   390  
   391  	log.PrintOrigins(true)
   392  	log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
   393  }
   394  
   395  var (
   396  	adapter  = flag.String("adapter", "exec", "type of simulation: sim|socket|exec|docker")
   397  	loglevel = flag.Int("loglevel", 0, "verbosity of logs")
   398  	nodes    = flag.Int("nodes", 0, "number of nodes")
   399  )
   400  
   401  var services = adapters.Services{
   402  	"lesclient": newLesClientService,
   403  	"lesserver": newLesServerService,
   404  }
   405  
   406  func NewNetwork() (*simulations.Network, func(), error) {
   407  	adapter, adapterTeardown, err := NewAdapter(*adapter, services)
   408  	if err != nil {
   409  		return nil, adapterTeardown, err
   410  	}
   411  	defaultService := "streamer"
   412  	net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{
   413  		ID:             "0",
   414  		DefaultService: defaultService,
   415  	})
   416  	teardown := func() {
   417  		adapterTeardown()
   418  		net.Shutdown()
   419  	}
   420  
   421  	return net, teardown, nil
   422  }
   423  
   424  func NewAdapter(adapterType string, services adapters.Services) (adapter adapters.NodeAdapter, teardown func(), err error) {
   425  	teardown = func() {}
   426  	switch adapterType {
   427  	case "sim":
   428  		adapter = adapters.NewSimAdapter(services)
   429  		//	case "socket":
   430  		//		adapter = adapters.NewSocketAdapter(services)
   431  	case "exec":
   432  		baseDir, err0 := ioutil.TempDir("", "les-test")
   433  		if err0 != nil {
   434  			return nil, teardown, err0
   435  		}
   436  		teardown = func() { os.RemoveAll(baseDir) }
   437  		adapter = adapters.NewExecAdapter(baseDir)
   438  	/*case "docker":
   439  	adapter, err = adapters.NewDockerAdapter()
   440  	if err != nil {
   441  		return nil, teardown, err
   442  	}*/
   443  	default:
   444  		return nil, teardown, errors.New("adapter needs to be one of sim, socket, exec, docker")
   445  	}
   446  	return adapter, teardown, nil
   447  }
   448  
   449  func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir []string, test func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool) bool {
   450  	net, teardown, err := NewNetwork()
   451  	defer teardown()
   452  	if err != nil {
   453  		t.Fatalf("Failed to create network: %v", err)
   454  	}
   455  	timeout := 1800 * time.Second
   456  	ctx, cancel := context.WithTimeout(context.Background(), timeout)
   457  	defer cancel()
   458  
   459  	servers := make([]*simulations.Node, serverCount)
   460  	clients := make([]*simulations.Node, clientCount)
   461  
   462  	for i := range clients {
   463  		clientconf := adapters.RandomNodeConfig()
   464  		clientconf.Services = []string{"lesclient"}
   465  		if len(clientDir) == clientCount {
   466  			clientconf.DataDir = clientDir[i]
   467  		}
   468  		client, err := net.NewNodeWithConfig(clientconf)
   469  		if err != nil {
   470  			t.Fatalf("Failed to create client: %v", err)
   471  		}
   472  		clients[i] = client
   473  	}
   474  
   475  	for i := range servers {
   476  		serverconf := adapters.RandomNodeConfig()
   477  		serverconf.Services = []string{"lesserver"}
   478  		if len(serverDir) == serverCount {
   479  			serverconf.DataDir = serverDir[i]
   480  		}
   481  		server, err := net.NewNodeWithConfig(serverconf)
   482  		if err != nil {
   483  			t.Fatalf("Failed to create server: %v", err)
   484  		}
   485  		servers[i] = server
   486  	}
   487  
   488  	for _, client := range clients {
   489  		if err := net.Start(client.ID()); err != nil {
   490  			t.Fatalf("Failed to start client node: %v", err)
   491  		}
   492  	}
   493  	for _, server := range servers {
   494  		if err := net.Start(server.ID()); err != nil {
   495  			t.Fatalf("Failed to start server node: %v", err)
   496  		}
   497  	}
   498  
   499  	return test(ctx, net, servers, clients)
   500  }
   501  
   502  func newLesClientService(ctx *adapters.ServiceContext) (node.Service, error) {
   503  	config := eth.DefaultConfig
   504  	config.SyncMode = downloader.LightSync
   505  	config.Ethash.PowMode = ethash.ModeFake
   506  	return New(ctx.NodeContext, &config)
   507  }
   508  
   509  func newLesServerService(ctx *adapters.ServiceContext) (node.Service, error) {
   510  	config := eth.DefaultConfig
   511  	config.SyncMode = downloader.FullSync
   512  	config.LightServ = testServerCapacity
   513  	config.LightPeers = testMaxClients
   514  	ethereum, err := eth.New(ctx.NodeContext, &config)
   515  	if err != nil {
   516  		return nil, err
   517  	}
   518  
   519  	server, err := NewLesServer(ethereum, &config)
   520  	if err != nil {
   521  		return nil, err
   522  	}
   523  	ethereum.AddLesServer(server)
   524  	return ethereum, nil
   525  }