github.com/yankunsam/loki/v2@v2.6.3-0.20220817130409-389df5235c27/pkg/querier/tail_test.go (about)

     1  package querier
     2  
     3  import (
     4  	"errors"
     5  	"testing"
     6  	"time"
     7  
     8  	"github.com/stretchr/testify/assert"
     9  	"github.com/stretchr/testify/require"
    10  
    11  	"github.com/grafana/loki/pkg/iter"
    12  	loghttp "github.com/grafana/loki/pkg/loghttp/legacy"
    13  	"github.com/grafana/loki/pkg/logproto"
    14  )
    15  
    16  const (
    17  	timeout  = 1 * time.Second
    18  	throttle = 10 * time.Millisecond
    19  )
    20  
    21  func TestTailer(t *testing.T) {
    22  	t.Parallel()
    23  
    24  	tests := map[string]struct {
    25  		historicEntries iter.EntryIterator
    26  		tailClient      *tailClientMock
    27  		tester          func(t *testing.T, tailer *Tailer, tailClient *tailClientMock)
    28  	}{
    29  		"tail logs from historic entries only (no tail clients provided)": {
    30  			historicEntries: mockStreamIterator(1, 2),
    31  			tailClient:      nil,
    32  			tester: func(t *testing.T, tailer *Tailer, tailClient *tailClientMock) {
    33  				responses, err := readFromTailer(tailer, 2)
    34  				require.NoError(t, err)
    35  
    36  				actual := flattenStreamsFromResponses(responses)
    37  
    38  				assert.Equal(t, []logproto.Stream{
    39  					mockStream(1, 1),
    40  					mockStream(2, 1),
    41  				}, actual)
    42  			},
    43  		},
    44  		"tail logs from tail clients only (no historic entries provided)": {
    45  			historicEntries: mockStreamIterator(0, 0),
    46  			tailClient:      newTailClientMock().mockRecvWithTrigger(mockTailResponse(mockStream(1, 1))),
    47  			tester: func(t *testing.T, tailer *Tailer, tailClient *tailClientMock) {
    48  				tailClient.triggerRecv()
    49  
    50  				responses, err := readFromTailer(tailer, 1)
    51  				require.NoError(t, err)
    52  
    53  				actual := flattenStreamsFromResponses(responses)
    54  
    55  				assert.Equal(t, []logproto.Stream{
    56  					mockStream(1, 1),
    57  				}, actual)
    58  			},
    59  		},
    60  		"tail logs both from historic entries and tail clients": {
    61  			historicEntries: mockStreamIterator(1, 2),
    62  			tailClient:      newTailClientMock().mockRecvWithTrigger(mockTailResponse(mockStream(3, 1))),
    63  			tester: func(t *testing.T, tailer *Tailer, tailClient *tailClientMock) {
    64  				tailClient.triggerRecv()
    65  
    66  				responses, err := readFromTailer(tailer, 3)
    67  				require.NoError(t, err)
    68  
    69  				actual := flattenStreamsFromResponses(responses)
    70  
    71  				assert.Equal(t, []logproto.Stream{
    72  					mockStream(1, 1),
    73  					mockStream(2, 1),
    74  					mockStream(3, 1),
    75  				}, actual)
    76  			},
    77  		},
    78  		"honor max entries per tail response": {
    79  			historicEntries: mockStreamIterator(1, maxEntriesPerTailResponse+1),
    80  			tailClient:      nil,
    81  			tester: func(t *testing.T, tailer *Tailer, tailClient *tailClientMock) {
    82  				responses, err := readFromTailer(tailer, maxEntriesPerTailResponse+1)
    83  				require.NoError(t, err)
    84  
    85  				require.Equal(t, 2, len(responses))
    86  				assert.Equal(t, maxEntriesPerTailResponse, countEntriesInStreams(responses[0].Streams))
    87  				assert.Equal(t, 1, countEntriesInStreams(responses[1].Streams))
    88  				assert.Equal(t, 0, len(responses[1].DroppedEntries))
    89  			},
    90  		},
    91  		"honor max buffered tail responses": {
    92  			historicEntries: mockStreamIterator(1, (maxEntriesPerTailResponse*maxBufferedTailResponses)+5),
    93  			tailClient:      newTailClientMock().mockRecvWithTrigger(mockTailResponse(mockStream(1, 1))),
    94  			tester: func(t *testing.T, tailer *Tailer, tailClient *tailClientMock) {
    95  				err := waitUntilTailerOpenStreamsHaveBeenConsumed(tailer)
    96  				require.NoError(t, err)
    97  
    98  				// Since the response channel is full/blocked, we do expect that all responses
    99  				// are "full" and extra entries from historic entries have been dropped
   100  				responses, err := readFromTailer(tailer, (maxEntriesPerTailResponse * maxBufferedTailResponses))
   101  				require.NoError(t, err)
   102  
   103  				require.Equal(t, maxBufferedTailResponses, len(responses))
   104  				for i := 0; i < maxBufferedTailResponses; i++ {
   105  					assert.Equal(t, maxEntriesPerTailResponse, countEntriesInStreams(responses[i].Streams))
   106  					assert.Equal(t, 0, len(responses[1].DroppedEntries))
   107  				}
   108  
   109  				// Since we'll not receive dropped entries until the next tail response, we're now
   110  				// going to trigger a Recv() from the tail client
   111  				tailClient.triggerRecv()
   112  
   113  				responses, err = readFromTailer(tailer, 1)
   114  				require.NoError(t, err)
   115  
   116  				require.Equal(t, 1, len(responses))
   117  				assert.Equal(t, 1, countEntriesInStreams(responses[0].Streams))
   118  				assert.Equal(t, 5, len(responses[0].DroppedEntries))
   119  			},
   120  		},
   121  		"honor max dropped entries per tail response": {
   122  			historicEntries: mockStreamIterator(1, (maxEntriesPerTailResponse*maxBufferedTailResponses)+maxDroppedEntriesPerTailResponse+5),
   123  			tailClient:      newTailClientMock().mockRecvWithTrigger(mockTailResponse(mockStream(1, 1))),
   124  			tester: func(t *testing.T, tailer *Tailer, tailClient *tailClientMock) {
   125  				err := waitUntilTailerOpenStreamsHaveBeenConsumed(tailer)
   126  				require.NoError(t, err)
   127  
   128  				// Since the response channel is full/blocked, we do expect that all responses
   129  				// are "full" and extra entries from historic entries have been dropped
   130  				responses, err := readFromTailer(tailer, (maxEntriesPerTailResponse * maxBufferedTailResponses))
   131  				require.NoError(t, err)
   132  
   133  				require.Equal(t, maxBufferedTailResponses, len(responses))
   134  				for i := 0; i < maxBufferedTailResponses; i++ {
   135  					assert.Equal(t, maxEntriesPerTailResponse, countEntriesInStreams(responses[i].Streams))
   136  					assert.Equal(t, 0, len(responses[1].DroppedEntries))
   137  				}
   138  
   139  				// Since we'll not receive dropped entries until the next tail response, we're now
   140  				// going to trigger a Recv() from the tail client
   141  				tailClient.triggerRecv()
   142  
   143  				responses, err = readFromTailer(tailer, 1)
   144  				require.NoError(t, err)
   145  
   146  				require.Equal(t, 1, len(responses))
   147  				assert.Equal(t, 1, countEntriesInStreams(responses[0].Streams))
   148  				assert.Equal(t, maxDroppedEntriesPerTailResponse, len(responses[0].DroppedEntries))
   149  			},
   150  		},
   151  	}
   152  
   153  	for testName, test := range tests {
   154  		t.Run(testName, func(t *testing.T) {
   155  			tailDisconnectedIngesters := func([]string) (map[string]logproto.Querier_TailClient, error) {
   156  				return map[string]logproto.Querier_TailClient{}, nil
   157  			}
   158  
   159  			tailClients := map[string]logproto.Querier_TailClient{}
   160  			if test.tailClient != nil {
   161  				tailClients["test"] = test.tailClient
   162  			}
   163  
   164  			tailer := newTailer(0, tailClients, test.historicEntries, tailDisconnectedIngesters, timeout, throttle, NewMetrics(nil))
   165  			defer tailer.close()
   166  
   167  			test.tester(t, tailer, test.tailClient)
   168  		})
   169  	}
   170  }
   171  
   172  func readFromTailer(tailer *Tailer, maxEntries int) ([]*loghttp.TailResponse, error) {
   173  	responses := make([]*loghttp.TailResponse, 0)
   174  	entriesCount := 0
   175  
   176  	// Ensure we do not wait indefinitely
   177  	timeoutTicker := time.NewTicker(timeout)
   178  	defer timeoutTicker.Stop()
   179  
   180  	for !tailer.stopped && entriesCount < maxEntries {
   181  		select {
   182  		case <-timeoutTicker.C:
   183  			return nil, errors.New("timeout expired while reading responses from Tailer")
   184  		case response := <-tailer.getResponseChan():
   185  			responses = append(responses, response)
   186  			entriesCount += countEntriesInStreams(response.Streams)
   187  		default:
   188  			time.Sleep(throttle)
   189  		}
   190  	}
   191  
   192  	return responses, nil
   193  }
   194  
   195  func waitUntilTailerOpenStreamsHaveBeenConsumed(tailer *Tailer) error {
   196  	// Ensure we do not wait indefinitely
   197  	timeoutTicker := time.NewTicker(timeout)
   198  	defer timeoutTicker.Stop()
   199  
   200  	for {
   201  		if isTailerOpenStreamsConsumed(tailer) {
   202  			return nil
   203  		}
   204  
   205  		select {
   206  		case <-timeoutTicker.C:
   207  			return errors.New("timeout expired while reading responses from Tailer")
   208  		default:
   209  			time.Sleep(throttle)
   210  		}
   211  	}
   212  }
   213  
   214  // isTailerOpenStreamsConsumed returns whether the input Tailer has fully
   215  // consumed all streams from the openStreamIterator, which means the
   216  // Tailer.loop() is now throttling
   217  func isTailerOpenStreamsConsumed(tailer *Tailer) bool {
   218  	tailer.streamMtx.Lock()
   219  	defer tailer.streamMtx.Unlock()
   220  
   221  	return tailer.openStreamIterator.Len() == 0 || tailer.openStreamIterator.Peek() == time.Unix(0, 0)
   222  }
   223  
   224  func countEntriesInStreams(streams []logproto.Stream) int {
   225  	count := 0
   226  
   227  	for _, stream := range streams {
   228  		count += len(stream.Entries)
   229  	}
   230  
   231  	return count
   232  }
   233  
   234  // flattenStreamsFromResponses returns an array of streams each one containing
   235  // one and only one entry from the input list of responses. This function is used
   236  // to abstract away implementation details in the Tailer when testing for the output
   237  // regardless how the responses have been generated (ie. multiple entries grouped
   238  // into the same stream)
   239  func flattenStreamsFromResponses(responses []*loghttp.TailResponse) []logproto.Stream {
   240  	result := make([]logproto.Stream, 0)
   241  
   242  	for _, response := range responses {
   243  		for _, stream := range response.Streams {
   244  			for _, entry := range stream.Entries {
   245  				result = append(result, logproto.Stream{
   246  					Entries: []logproto.Entry{entry},
   247  					Labels:  stream.Labels,
   248  				})
   249  			}
   250  		}
   251  	}
   252  
   253  	return result
   254  }