github.com/adoriasoft/tendermint@v0.34.0-dev1.0.20200722151356-96d84601a75a/rpc/jsonrpc/client/integration_test.go (about)

     1  // +build release
     2  
     3  // The code in here is comprehensive as an integration
     4  // test and is long, hence is only run before releases.
     5  
     6  package client
     7  
     8  import (
     9  	"bytes"
    10  	"errors"
    11  	"net"
    12  	"regexp"
    13  	"testing"
    14  	"time"
    15  
    16  	"github.com/stretchr/testify/require"
    17  
    18  	"github.com/tendermint/tendermint/libs/log"
    19  )
    20  
    21  func TestWSClientReconnectWithJitter(t *testing.T) {
    22  	n := 8
    23  	maxReconnectAttempts := 3
    24  	// Max wait time is ceil(1+0.999) + ceil(2+0.999) + ceil(4+0.999) + ceil(...) = 2 + 3 + 5 = 10s + ...
    25  	maxSleepTime := time.Second * time.Duration(((1<<uint(maxReconnectAttempts))-1)+maxReconnectAttempts)
    26  
    27  	var errNotConnected = errors.New("not connected")
    28  	clientMap := make(map[int]*WSClient)
    29  	buf := new(bytes.Buffer)
    30  	logger := log.NewTMLogger(buf)
    31  	for i := 0; i < n; i++ {
    32  		c, err := NewWS("tcp://foo", "/websocket")
    33  		require.Nil(t, err)
    34  		c.Dialer = func(string, string) (net.Conn, error) {
    35  			return nil, errNotConnected
    36  		}
    37  		c.SetLogger(logger)
    38  		c.maxReconnectAttempts = maxReconnectAttempts
    39  		// Not invoking defer c.Stop() because
    40  		// after all the reconnect attempts have been
    41  		// exhausted, c.Stop is implicitly invoked.
    42  		clientMap[i] = c
    43  		// Trigger the reconnect routine that performs exponential backoff.
    44  		go c.reconnect()
    45  	}
    46  
    47  	stopCount := 0
    48  	time.Sleep(maxSleepTime)
    49  	for key, c := range clientMap {
    50  		if !c.IsActive() {
    51  			delete(clientMap, key)
    52  			stopCount++
    53  		}
    54  	}
    55  	require.Equal(t, stopCount, n, "expecting all clients to have been stopped")
    56  
    57  	// Next we have to examine the logs to ensure that no single time was repeated
    58  	backoffDurRegexp := regexp.MustCompile(`backoff_duration=(.+)`)
    59  	matches := backoffDurRegexp.FindAll(buf.Bytes(), -1)
    60  	seenMap := make(map[string]int)
    61  	for i, match := range matches {
    62  		if origIndex, seen := seenMap[string(match)]; seen {
    63  			t.Errorf("match #%d (%q) was seen originally at log entry #%d", i, match, origIndex)
    64  		} else {
    65  			seenMap[string(match)] = i
    66  		}
    67  	}
    68  }