github.com/Oyster-zx/tendermint@v0.34.24-fork/rpc/jsonrpc/client/integration_test.go (about)

     1  //go:build release
     2  // +build release
     3  
     4  // The code in here is comprehensive as an integration
     5  // test and is long, hence is only run before releases.
     6  
     7  package client
     8  
     9  import (
    10  	"bytes"
    11  	"errors"
    12  	"net"
    13  	"regexp"
    14  	"testing"
    15  	"time"
    16  
    17  	"github.com/stretchr/testify/require"
    18  
    19  	"github.com/tendermint/tendermint/libs/log"
    20  )
    21  
    22  func TestWSClientReconnectWithJitter(t *testing.T) {
    23  	n := 8
    24  	maxReconnectAttempts := 3
    25  	// Max wait time is ceil(1+0.999) + ceil(2+0.999) + ceil(4+0.999) + ceil(...) = 2 + 3 + 5 = 10s + ...
    26  	maxSleepTime := time.Second * time.Duration(((1<<uint(maxReconnectAttempts))-1)+maxReconnectAttempts)
    27  
    28  	var errNotConnected = errors.New("not connected")
    29  	clientMap := make(map[int]*WSClient)
    30  	buf := new(bytes.Buffer)
    31  	logger := log.NewTMLogger(buf)
    32  	for i := 0; i < n; i++ {
    33  		c, err := NewWS("tcp://foo", "/websocket")
    34  		require.Nil(t, err)
    35  		c.Dialer = func(string, string) (net.Conn, error) {
    36  			return nil, errNotConnected
    37  		}
    38  		c.SetLogger(logger)
    39  		c.maxReconnectAttempts = maxReconnectAttempts
    40  		// Not invoking defer c.Stop() because
    41  		// after all the reconnect attempts have been
    42  		// exhausted, c.Stop is implicitly invoked.
    43  		clientMap[i] = c
    44  		// Trigger the reconnect routine that performs exponential backoff.
    45  		go c.reconnect()
    46  	}
    47  
    48  	stopCount := 0
    49  	time.Sleep(maxSleepTime)
    50  	for key, c := range clientMap {
    51  		if !c.IsActive() {
    52  			delete(clientMap, key)
    53  			stopCount++
    54  		}
    55  	}
    56  	require.Equal(t, stopCount, n, "expecting all clients to have been stopped")
    57  
    58  	// Next we have to examine the logs to ensure that no single time was repeated
    59  	backoffDurRegexp := regexp.MustCompile(`backoff_duration=(.+)`)
    60  	matches := backoffDurRegexp.FindAll(buf.Bytes(), -1)
    61  	seenMap := make(map[string]int)
    62  	for i, match := range matches {
    63  		if origIndex, seen := seenMap[string(match)]; seen {
    64  			t.Errorf("match #%d (%q) was seen originally at log entry #%d", i, match, origIndex)
    65  		} else {
    66  			seenMap[string(match)] = i
    67  		}
    68  	}
    69  }