github.com/theQRL/go-zond@v0.1.1/les/api_test.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"context"
    21  	crand "crypto/rand"
    22  	"errors"
    23  	"flag"
    24  	"math/rand"
    25  	"os"
    26  	"sync"
    27  	"sync/atomic"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/mattn/go-colorable"
    32  	"github.com/theQRL/go-zond/common"
    33  	"github.com/theQRL/go-zond/common/hexutil"
    34  	"github.com/theQRL/go-zond/les/flowcontrol"
    35  	"github.com/theQRL/go-zond/log"
    36  	"github.com/theQRL/go-zond/node"
    37  	"github.com/theQRL/go-zond/p2p/enode"
    38  	"github.com/theQRL/go-zond/p2p/simulations"
    39  	"github.com/theQRL/go-zond/p2p/simulations/adapters"
    40  	"github.com/theQRL/go-zond/rpc"
    41  	"github.com/theQRL/go-zond/zond"
    42  	"github.com/theQRL/go-zond/zond/downloader"
    43  	"github.com/theQRL/go-zond/zond/ethconfig"
    44  )
    45  
    46  // Additional command line flags for the test binary.
    47  var (
    48  	loglevel   = flag.Int("loglevel", 0, "verbosity of logs")
    49  	simAdapter = flag.String("adapter", "exec", "type of simulation: sim|socket|exec|docker")
    50  )
    51  
    52  func TestMain(m *testing.M) {
    53  	flag.Parse()
    54  	log.PrintOrigins(true)
    55  	log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
    56  	// register the Delivery service which will run as a devp2p
    57  	// protocol when using the exec adapter
    58  	adapters.RegisterLifecycles(services)
    59  	os.Exit(m.Run())
    60  }
    61  
    62  // This test is not meant to be a part of the automatic testing process because it
    63  // runs for a long time and also requires a large database in order to do a meaningful
    64  // request performance test. When testServerDataDir is empty, the test is skipped.
    65  
    66  const (
    67  	testServerDataDir  = "" // should always be empty on the master branch
    68  	testServerCapacity = 200
    69  	testMaxClients     = 10
    70  	testTolerance      = 0.1
    71  	minRelCap          = 0.2
    72  )
    73  
    74  func TestCapacityAPI3(t *testing.T) {
    75  	testCapacityAPI(t, 3)
    76  }
    77  
    78  func TestCapacityAPI6(t *testing.T) {
    79  	testCapacityAPI(t, 6)
    80  }
    81  
    82  func TestCapacityAPI10(t *testing.T) {
    83  	testCapacityAPI(t, 10)
    84  }
    85  
    86  // testCapacityAPI runs an end-to-end simulation test connecting one server with
    87  // a given number of clients. It sets different priority capacities to all clients
    88  // except a randomly selected one which runs in free client mode. All clients send
    89  // similar requests at the maximum allowed rate and the test verifies whether the
    90  // ratio of processed requests is close enough to the ratio of assigned capacities.
    91  // Running multiple rounds with different settings ensures that changing capacity
    92  // while connected and going back and forth between free and priority mode with
    93  // the supplied API calls is also thoroughly tested.
    94  func testCapacityAPI(t *testing.T, clientCount int) {
    95  	// Skip test if no data dir specified
    96  	if testServerDataDir == "" {
    97  		return
    98  	}
    99  	for !testSim(t, 1, clientCount, []string{testServerDataDir}, nil, func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool {
   100  		if len(servers) != 1 {
   101  			t.Fatalf("Invalid number of servers: %d", len(servers))
   102  		}
   103  		server := servers[0]
   104  
   105  		serverRpcClient, err := server.Client()
   106  		if err != nil {
   107  			t.Fatalf("Failed to obtain rpc client: %v", err)
   108  		}
   109  		headNum, headHash := getHead(ctx, t, serverRpcClient)
   110  		minCap, totalCap := getCapacityInfo(ctx, t, serverRpcClient)
   111  		testCap := totalCap * 3 / 4
   112  		t.Logf("Server testCap: %d  minCap: %d  head number: %d  head hash: %064x\n", testCap, minCap, headNum, headHash)
   113  		reqMinCap := uint64(float64(testCap) * minRelCap / (minRelCap + float64(len(clients)-1)))
   114  		if minCap > reqMinCap {
   115  			t.Fatalf("Minimum client capacity (%d) bigger than required minimum for this test (%d)", minCap, reqMinCap)
   116  		}
   117  		freeIdx := rand.Intn(len(clients))
   118  
   119  		clientRpcClients := make([]*rpc.Client, len(clients))
   120  		for i, client := range clients {
   121  			var err error
   122  			clientRpcClients[i], err = client.Client()
   123  			if err != nil {
   124  				t.Fatalf("Failed to obtain rpc client: %v", err)
   125  			}
   126  			t.Log("connecting client", i)
   127  			if i != freeIdx {
   128  				setCapacity(ctx, t, serverRpcClient, client.ID(), testCap/uint64(len(clients)))
   129  			}
   130  			net.Connect(client.ID(), server.ID())
   131  
   132  			for {
   133  				select {
   134  				case <-ctx.Done():
   135  					t.Fatalf("Timeout")
   136  				default:
   137  				}
   138  				num, hash := getHead(ctx, t, clientRpcClients[i])
   139  				if num == headNum && hash == headHash {
   140  					t.Log("client", i, "synced")
   141  					break
   142  				}
   143  				time.Sleep(time.Millisecond * 200)
   144  			}
   145  		}
   146  
   147  		var wg sync.WaitGroup
   148  		stop := make(chan struct{})
   149  
   150  		reqCount := make([]atomic.Uint64, len(clientRpcClients))
   151  
   152  		// Send light request like crazy.
   153  		for i, c := range clientRpcClients {
   154  			wg.Add(1)
   155  			i, c := i, c
   156  			go func() {
   157  				defer wg.Done()
   158  
   159  				queue := make(chan struct{}, 100)
   160  				reqCount[i].Store(0)
   161  				for {
   162  					select {
   163  					case queue <- struct{}{}:
   164  						select {
   165  						case <-stop:
   166  							return
   167  						case <-ctx.Done():
   168  							return
   169  						default:
   170  							wg.Add(1)
   171  							go func() {
   172  								ok := testRequest(ctx, t, c)
   173  								wg.Done()
   174  								<-queue
   175  								if ok {
   176  									if reqCount[i].Add(1)%10000 == 0 {
   177  										freezeClient(ctx, t, serverRpcClient, clients[i].ID())
   178  									}
   179  								}
   180  							}()
   181  						}
   182  					case <-stop:
   183  						return
   184  					case <-ctx.Done():
   185  						return
   186  					}
   187  				}
   188  			}()
   189  		}
   190  
   191  		processedSince := func(start []uint64) []uint64 {
   192  			res := make([]uint64, len(reqCount))
   193  			for i := range reqCount {
   194  				res[i] = reqCount[i].Load()
   195  				if start != nil {
   196  					res[i] -= start[i]
   197  				}
   198  			}
   199  			return res
   200  		}
   201  
   202  		weights := make([]float64, len(clients))
   203  		for c := 0; c < 5; c++ {
   204  			setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), minCap)
   205  			freeIdx = rand.Intn(len(clients))
   206  			var sum float64
   207  			for i := range clients {
   208  				if i == freeIdx {
   209  					weights[i] = 0
   210  				} else {
   211  					weights[i] = rand.Float64()*(1-minRelCap) + minRelCap
   212  				}
   213  				sum += weights[i]
   214  			}
   215  			for i, client := range clients {
   216  				weights[i] *= float64(testCap-minCap-100) / sum
   217  				capacity := uint64(weights[i])
   218  				if i != freeIdx && capacity < getCapacity(ctx, t, serverRpcClient, client.ID()) {
   219  					setCapacity(ctx, t, serverRpcClient, client.ID(), capacity)
   220  				}
   221  			}
   222  			setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), 0)
   223  			for i, client := range clients {
   224  				capacity := uint64(weights[i])
   225  				if i != freeIdx && capacity > getCapacity(ctx, t, serverRpcClient, client.ID()) {
   226  					setCapacity(ctx, t, serverRpcClient, client.ID(), capacity)
   227  				}
   228  			}
   229  			weights[freeIdx] = float64(minCap)
   230  			for i := range clients {
   231  				weights[i] /= float64(testCap)
   232  			}
   233  
   234  			time.Sleep(flowcontrol.DecParamDelay)
   235  			t.Log("Starting measurement")
   236  			t.Logf("Relative weights:")
   237  			for i := range clients {
   238  				t.Logf("  %f", weights[i])
   239  			}
   240  			t.Log()
   241  			start := processedSince(nil)
   242  			for {
   243  				select {
   244  				case <-ctx.Done():
   245  					t.Fatalf("Timeout")
   246  				default:
   247  				}
   248  
   249  				_, totalCap = getCapacityInfo(ctx, t, serverRpcClient)
   250  				if totalCap < testCap {
   251  					t.Log("Total capacity underrun")
   252  					close(stop)
   253  					wg.Wait()
   254  					return false
   255  				}
   256  
   257  				processed := processedSince(start)
   258  				var avg uint64
   259  				t.Logf("Processed")
   260  				for i, p := range processed {
   261  					t.Logf(" %d", p)
   262  					processed[i] = uint64(float64(p) / weights[i])
   263  					avg += processed[i]
   264  				}
   265  				avg /= uint64(len(processed))
   266  
   267  				if avg >= 10000 {
   268  					var maxDev float64
   269  					for _, p := range processed {
   270  						dev := float64(int64(p-avg)) / float64(avg)
   271  						t.Logf(" %7.4f", dev)
   272  						if dev < 0 {
   273  							dev = -dev
   274  						}
   275  						if dev > maxDev {
   276  							maxDev = dev
   277  						}
   278  					}
   279  					t.Logf("  max deviation: %f  totalCap: %d\n", maxDev, totalCap)
   280  					if maxDev <= testTolerance {
   281  						t.Log("success")
   282  						break
   283  					}
   284  				} else {
   285  					t.Log()
   286  				}
   287  				time.Sleep(time.Millisecond * 200)
   288  			}
   289  		}
   290  
   291  		close(stop)
   292  		wg.Wait()
   293  
   294  		for i := range reqCount {
   295  			t.Log("client", i, "processed", reqCount[i].Load())
   296  		}
   297  		return true
   298  	}) {
   299  		t.Log("restarting test")
   300  	}
   301  }
   302  
   303  func getHead(ctx context.Context, t *testing.T, client *rpc.Client) (uint64, common.Hash) {
   304  	res := make(map[string]interface{})
   305  	if err := client.CallContext(ctx, &res, "zond_getBlockByNumber", "latest", false); err != nil {
   306  		t.Fatalf("Failed to obtain head block: %v", err)
   307  	}
   308  	numStr, ok := res["number"].(string)
   309  	if !ok {
   310  		t.Fatalf("RPC block number field invalid")
   311  	}
   312  	num, err := hexutil.DecodeUint64(numStr)
   313  	if err != nil {
   314  		t.Fatalf("Failed to decode RPC block number: %v", err)
   315  	}
   316  	hashStr, ok := res["hash"].(string)
   317  	if !ok {
   318  		t.Fatalf("RPC block number field invalid")
   319  	}
   320  	hash := common.HexToHash(hashStr)
   321  	return num, hash
   322  }
   323  
   324  func testRequest(ctx context.Context, t *testing.T, client *rpc.Client) bool {
   325  	var res string
   326  	var addr common.Address
   327  	crand.Read(addr[:])
   328  	c, cancel := context.WithTimeout(ctx, time.Second*12)
   329  	defer cancel()
   330  	err := client.CallContext(c, &res, "zond_getBalance", addr, "latest")
   331  	if err != nil {
   332  		t.Log("request error:", err)
   333  	}
   334  	return err == nil
   335  }
   336  
   337  func freezeClient(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID) {
   338  	if err := server.CallContext(ctx, nil, "debug_freezeClient", clientID); err != nil {
   339  		t.Fatalf("Failed to freeze client: %v", err)
   340  	}
   341  }
   342  
   343  func setCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID, cap uint64) {
   344  	params := make(map[string]interface{})
   345  	params["capacity"] = cap
   346  	if err := server.CallContext(ctx, nil, "les_setClientParams", []enode.ID{clientID}, []string{}, params); err != nil {
   347  		t.Fatalf("Failed to set client capacity: %v", err)
   348  	}
   349  }
   350  
   351  func getCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID) uint64 {
   352  	var res map[enode.ID]map[string]interface{}
   353  	if err := server.CallContext(ctx, &res, "les_clientInfo", []enode.ID{clientID}, []string{}); err != nil {
   354  		t.Fatalf("Failed to get client info: %v", err)
   355  	}
   356  	info, ok := res[clientID]
   357  	if !ok {
   358  		t.Fatalf("Missing client info")
   359  	}
   360  	v, ok := info["capacity"]
   361  	if !ok {
   362  		t.Fatalf("Missing field in client info: capacity")
   363  	}
   364  	vv, ok := v.(float64)
   365  	if !ok {
   366  		t.Fatalf("Failed to decode capacity field")
   367  	}
   368  	return uint64(vv)
   369  }
   370  
   371  func getCapacityInfo(ctx context.Context, t *testing.T, server *rpc.Client) (minCap, totalCap uint64) {
   372  	var res map[string]interface{}
   373  	if err := server.CallContext(ctx, &res, "les_serverInfo"); err != nil {
   374  		t.Fatalf("Failed to query server info: %v", err)
   375  	}
   376  	decode := func(s string) uint64 {
   377  		v, ok := res[s]
   378  		if !ok {
   379  			t.Fatalf("Missing field in server info: %s", s)
   380  		}
   381  		vv, ok := v.(float64)
   382  		if !ok {
   383  			t.Fatalf("Failed to decode server info field: %s", s)
   384  		}
   385  		return uint64(vv)
   386  	}
   387  	minCap = decode("minimumCapacity")
   388  	totalCap = decode("totalCapacity")
   389  	return
   390  }
   391  
   392  var services = adapters.LifecycleConstructors{
   393  	"lesclient": newLesClientService,
   394  	"lesserver": newLesServerService,
   395  }
   396  
   397  func NewNetwork() (*simulations.Network, func(), error) {
   398  	adapter, adapterTeardown, err := NewAdapter(*simAdapter, services)
   399  	if err != nil {
   400  		return nil, adapterTeardown, err
   401  	}
   402  	defaultService := "streamer"
   403  	net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{
   404  		ID:             "0",
   405  		DefaultService: defaultService,
   406  	})
   407  	teardown := func() {
   408  		adapterTeardown()
   409  		net.Shutdown()
   410  	}
   411  	return net, teardown, nil
   412  }
   413  
   414  func NewAdapter(adapterType string, services adapters.LifecycleConstructors) (adapter adapters.NodeAdapter, teardown func(), err error) {
   415  	teardown = func() {}
   416  	switch adapterType {
   417  	case "sim":
   418  		adapter = adapters.NewSimAdapter(services)
   419  		//	case "socket":
   420  		//		adapter = adapters.NewSocketAdapter(services)
   421  	case "exec":
   422  		baseDir, err0 := os.MkdirTemp("", "les-test")
   423  		if err0 != nil {
   424  			return nil, teardown, err0
   425  		}
   426  		teardown = func() { os.RemoveAll(baseDir) }
   427  		adapter = adapters.NewExecAdapter(baseDir)
   428  	/*case "docker":
   429  	adapter, err = adapters.NewDockerAdapter()
   430  	if err != nil {
   431  		return nil, teardown, err
   432  	}*/
   433  	default:
   434  		return nil, teardown, errors.New("adapter needs to be one of sim, socket, exec, docker")
   435  	}
   436  	return adapter, teardown, nil
   437  }
   438  
   439  func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir []string, test func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool) bool {
   440  	net, teardown, err := NewNetwork()
   441  	defer teardown()
   442  	if err != nil {
   443  		t.Fatalf("Failed to create network: %v", err)
   444  	}
   445  	timeout := 1800 * time.Second
   446  	ctx, cancel := context.WithTimeout(context.Background(), timeout)
   447  	defer cancel()
   448  
   449  	servers := make([]*simulations.Node, serverCount)
   450  	clients := make([]*simulations.Node, clientCount)
   451  
   452  	for i := range clients {
   453  		clientconf := adapters.RandomNodeConfig()
   454  		clientconf.Lifecycles = []string{"lesclient"}
   455  		if len(clientDir) == clientCount {
   456  			clientconf.DataDir = clientDir[i]
   457  		}
   458  		client, err := net.NewNodeWithConfig(clientconf)
   459  		if err != nil {
   460  			t.Fatalf("Failed to create client: %v", err)
   461  		}
   462  		clients[i] = client
   463  	}
   464  
   465  	for i := range servers {
   466  		serverconf := adapters.RandomNodeConfig()
   467  		serverconf.Lifecycles = []string{"lesserver"}
   468  		if len(serverDir) == serverCount {
   469  			serverconf.DataDir = serverDir[i]
   470  		}
   471  		server, err := net.NewNodeWithConfig(serverconf)
   472  		if err != nil {
   473  			t.Fatalf("Failed to create server: %v", err)
   474  		}
   475  		servers[i] = server
   476  	}
   477  
   478  	for _, client := range clients {
   479  		if err := net.Start(client.ID()); err != nil {
   480  			t.Fatalf("Failed to start client node: %v", err)
   481  		}
   482  	}
   483  	for _, server := range servers {
   484  		if err := net.Start(server.ID()); err != nil {
   485  			t.Fatalf("Failed to start server node: %v", err)
   486  		}
   487  	}
   488  
   489  	return test(ctx, net, servers, clients)
   490  }
   491  
   492  func newLesClientService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
   493  	config := ethconfig.Defaults
   494  	config.SyncMode = downloader.LightSync
   495  	return New(stack, &config)
   496  }
   497  
   498  func newLesServerService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
   499  	config := ethconfig.Defaults
   500  	config.SyncMode = downloader.FullSync
   501  	config.LightServ = testServerCapacity
   502  	config.LightPeers = testMaxClients
   503  	ethereum, err := zond.New(stack, &config)
   504  	if err != nil {
   505  		return nil, err
   506  	}
   507  	_, err = NewLesServer(stack, ethereum, &config)
   508  	if err != nil {
   509  		return nil, err
   510  	}
   511  	return ethereum, nil
   512  }