github.com/aigarnetwork/aigar@v0.0.0-20191115204914-d59a6eb70f8e/les/flowcontrol/manager_test.go (about)

     1  //  Copyright 2018 The go-ethereum Authors
     2  //  Copyright 2019 The go-aigar Authors
     3  //  This file is part of the go-aigar library.
     4  //
     5  //  The go-aigar library is free software: you can redistribute it and/or modify
     6  //  it under the terms of the GNU Lesser General Public License as published by
     7  //  the Free Software Foundation, either version 3 of the License, or
     8  //  (at your option) any later version.
     9  //
    10  //  The go-aigar library is distributed in the hope that it will be useful,
    11  //  but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  //  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  //  GNU Lesser General Public License for more details.
    14  //
    15  //  You should have received a copy of the GNU Lesser General Public License
    16  //  along with the go-aigar library. If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package flowcontrol
    19  
    20  import (
    21  	"math/rand"
    22  	"testing"
    23  	"time"
    24  
    25  	"github.com/AigarNetwork/aigar/common/mclock"
    26  )
    27  
    28  type testNode struct {
    29  	node               *ClientNode
    30  	bufLimit, capacity uint64
    31  	waitUntil          mclock.AbsTime
    32  	index, totalCost   uint64
    33  }
    34  
    35  const (
    36  	testMaxCost = 1000000
    37  	testLength  = 100000
    38  )
    39  
    40  // testConstantTotalCapacity simulates multiple request sender nodes and verifies
    41  // whether the total amount of served requests matches the expected value based on
    42  // the total capacity and the duration of the test.
    43  // Some nodes are sending requests occasionally so that their buffer should regularly
    44  // reach the maximum while other nodes (the "max capacity nodes") are sending at the
    45  // maximum permitted rate. The max capacity nodes are changed multiple times during
    46  // a single test.
    47  func TestConstantTotalCapacity(t *testing.T) {
    48  	testConstantTotalCapacity(t, 10, 1, 0)
    49  	testConstantTotalCapacity(t, 10, 1, 1)
    50  	testConstantTotalCapacity(t, 30, 1, 0)
    51  	testConstantTotalCapacity(t, 30, 2, 3)
    52  	testConstantTotalCapacity(t, 100, 1, 0)
    53  	testConstantTotalCapacity(t, 100, 3, 5)
    54  	testConstantTotalCapacity(t, 100, 5, 10)
    55  }
    56  
    57  func testConstantTotalCapacity(t *testing.T, nodeCount, maxCapacityNodes, randomSend int) {
    58  	clock := &mclock.Simulated{}
    59  	nodes := make([]*testNode, nodeCount)
    60  	var totalCapacity uint64
    61  	for i := range nodes {
    62  		nodes[i] = &testNode{capacity: uint64(50000 + rand.Intn(100000))}
    63  		totalCapacity += nodes[i].capacity
    64  	}
    65  	m := NewClientManager(PieceWiseLinear{{0, totalCapacity}}, clock)
    66  	for _, n := range nodes {
    67  		n.bufLimit = n.capacity * 6000
    68  		n.node = NewClientNode(m, ServerParams{BufLimit: n.bufLimit, MinRecharge: n.capacity})
    69  	}
    70  	maxNodes := make([]int, maxCapacityNodes)
    71  	for i := range maxNodes {
    72  		// we don't care if some indexes are selected multiple times
    73  		// in that case we have fewer max nodes
    74  		maxNodes[i] = rand.Intn(nodeCount)
    75  	}
    76  
    77  	var sendCount int
    78  	for i := 0; i < testLength; i++ {
    79  		now := clock.Now()
    80  		for _, idx := range maxNodes {
    81  			for nodes[idx].send(t, now) {
    82  			}
    83  		}
    84  		if rand.Intn(testLength) < maxCapacityNodes*3 {
    85  			maxNodes[rand.Intn(maxCapacityNodes)] = rand.Intn(nodeCount)
    86  		}
    87  
    88  		sendCount += randomSend
    89  		failCount := randomSend * 10
    90  		for sendCount > 0 && failCount > 0 {
    91  			if nodes[rand.Intn(nodeCount)].send(t, now) {
    92  				sendCount--
    93  			} else {
    94  				failCount--
    95  			}
    96  		}
    97  		clock.Run(time.Millisecond)
    98  	}
    99  
   100  	var totalCost uint64
   101  	for _, n := range nodes {
   102  		totalCost += n.totalCost
   103  	}
   104  	ratio := float64(totalCost) / float64(totalCapacity) / testLength
   105  	if ratio < 0.98 || ratio > 1.02 {
   106  		t.Errorf("totalCost/totalCapacity/testLength ratio incorrect (expected: 1, got: %f)", ratio)
   107  	}
   108  
   109  }
   110  
   111  func (n *testNode) send(t *testing.T, now mclock.AbsTime) bool {
   112  	if now < n.waitUntil {
   113  		return false
   114  	}
   115  	n.index++
   116  	if ok, _, _ := n.node.AcceptRequest(0, n.index, testMaxCost); !ok {
   117  		t.Fatalf("Rejected request after expected waiting time has passed")
   118  	}
   119  	rcost := uint64(rand.Int63n(testMaxCost))
   120  	bv := n.node.RequestProcessed(0, n.index, testMaxCost, rcost)
   121  	if bv < testMaxCost {
   122  		n.waitUntil = now + mclock.AbsTime((testMaxCost-bv)*1001000/n.capacity)
   123  	}
   124  	n.totalCost += rcost
   125  	return true
   126  }