github.com/tirogen/go-ethereum@v1.10.12-0.20221226051715-250cfede41b6/les/flowcontrol/manager_test.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package flowcontrol
    18  
    19  import (
    20  	"math/rand"
    21  	"testing"
    22  	"time"
    23  
    24  	"github.com/tirogen/go-ethereum/common/mclock"
    25  )
    26  
    27  type testNode struct {
    28  	node               *ClientNode
    29  	bufLimit, capacity uint64
    30  	waitUntil          mclock.AbsTime
    31  	index, totalCost   uint64
    32  }
    33  
    34  const (
    35  	testMaxCost = 1000000
    36  	testLength  = 100000
    37  )
    38  
    39  // testConstantTotalCapacity simulates multiple request sender nodes and verifies
    40  // whether the total amount of served requests matches the expected value based on
    41  // the total capacity and the duration of the test.
    42  // Some nodes are sending requests occasionally so that their buffer should regularly
    43  // reach the maximum while other nodes (the "max capacity nodes") are sending at the
    44  // maximum permitted rate. The max capacity nodes are changed multiple times during
    45  // a single test.
    46  func TestConstantTotalCapacity(t *testing.T) {
    47  	testConstantTotalCapacity(t, 10, 1, 0)
    48  	testConstantTotalCapacity(t, 10, 1, 1)
    49  	testConstantTotalCapacity(t, 30, 1, 0)
    50  	testConstantTotalCapacity(t, 30, 2, 3)
    51  	testConstantTotalCapacity(t, 100, 1, 0)
    52  	testConstantTotalCapacity(t, 100, 3, 5)
    53  	testConstantTotalCapacity(t, 100, 5, 10)
    54  }
    55  
    56  func testConstantTotalCapacity(t *testing.T, nodeCount, maxCapacityNodes, randomSend int) {
    57  	clock := &mclock.Simulated{}
    58  	nodes := make([]*testNode, nodeCount)
    59  	var totalCapacity uint64
    60  	for i := range nodes {
    61  		nodes[i] = &testNode{capacity: uint64(50000 + rand.Intn(100000))}
    62  		totalCapacity += nodes[i].capacity
    63  	}
    64  	m := NewClientManager(PieceWiseLinear{{0, totalCapacity}}, clock)
    65  	for _, n := range nodes {
    66  		n.bufLimit = n.capacity * 6000
    67  		n.node = NewClientNode(m, ServerParams{BufLimit: n.bufLimit, MinRecharge: n.capacity})
    68  	}
    69  	maxNodes := make([]int, maxCapacityNodes)
    70  	for i := range maxNodes {
    71  		// we don't care if some indexes are selected multiple times
    72  		// in that case we have fewer max nodes
    73  		maxNodes[i] = rand.Intn(nodeCount)
    74  	}
    75  
    76  	var sendCount int
    77  	for i := 0; i < testLength; i++ {
    78  		now := clock.Now()
    79  		for _, idx := range maxNodes {
    80  			for nodes[idx].send(t, now) {
    81  			}
    82  		}
    83  		if rand.Intn(testLength) < maxCapacityNodes*3 {
    84  			maxNodes[rand.Intn(maxCapacityNodes)] = rand.Intn(nodeCount)
    85  		}
    86  
    87  		sendCount += randomSend
    88  		failCount := randomSend * 10
    89  		for sendCount > 0 && failCount > 0 {
    90  			if nodes[rand.Intn(nodeCount)].send(t, now) {
    91  				sendCount--
    92  			} else {
    93  				failCount--
    94  			}
    95  		}
    96  		clock.Run(time.Millisecond)
    97  	}
    98  
    99  	var totalCost uint64
   100  	for _, n := range nodes {
   101  		totalCost += n.totalCost
   102  	}
   103  	ratio := float64(totalCost) / float64(totalCapacity) / testLength
   104  	if ratio < 0.98 || ratio > 1.02 {
   105  		t.Errorf("totalCost/totalCapacity/testLength ratio incorrect (expected: 1, got: %f)", ratio)
   106  	}
   107  }
   108  
   109  func (n *testNode) send(t *testing.T, now mclock.AbsTime) bool {
   110  	if now < n.waitUntil {
   111  		return false
   112  	}
   113  	n.index++
   114  	if ok, _, _ := n.node.AcceptRequest(0, n.index, testMaxCost); !ok {
   115  		t.Fatalf("Rejected request after expected waiting time has passed")
   116  	}
   117  	rcost := uint64(rand.Int63n(testMaxCost))
   118  	bv := n.node.RequestProcessed(0, n.index, testMaxCost, rcost)
   119  	if bv < testMaxCost {
   120  		n.waitUntil = now + mclock.AbsTime((testMaxCost-bv)*1001000/n.capacity)
   121  	}
   122  	n.totalCost += rcost
   123  	return true
   124  }