github.com/ydb-platform/ydb-go-sdk/v3@v3.57.0/internal/topic/topicreaderinternal/stream_reader_impl_test.go (about)

     1  package topicreaderinternal
     2  
     3  import (
     4  	"bytes"
     5  	"compress/gzip"
     6  	"context"
     7  	"errors"
     8  	"io"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/stretchr/testify/require"
    13  	"go.uber.org/mock/gomock"
    14  
    15  	"github.com/ydb-platform/ydb-go-sdk/v3/internal/empty"
    16  	"github.com/ydb-platform/ydb-go-sdk/v3/internal/grpcwrapper/rawtopic/rawtopiccommon"
    17  	"github.com/ydb-platform/ydb-go-sdk/v3/internal/grpcwrapper/rawtopic/rawtopicreader"
    18  	"github.com/ydb-platform/ydb-go-sdk/v3/internal/grpcwrapper/rawydb"
    19  	"github.com/ydb-platform/ydb-go-sdk/v3/internal/xcontext"
    20  	"github.com/ydb-platform/ydb-go-sdk/v3/internal/xerrors"
    21  	"github.com/ydb-platform/ydb-go-sdk/v3/internal/xsync"
    22  	"github.com/ydb-platform/ydb-go-sdk/v3/internal/xtest"
    23  	"github.com/ydb-platform/ydb-go-sdk/v3/trace"
    24  )
    25  
    26  func TestTopicStreamReaderImpl_BufferCounterOnStopPartition(t *testing.T) {
    27  	table := []struct {
    28  		name     string
    29  		graceful bool
    30  	}{
    31  		{
    32  			name:     "graceful",
    33  			graceful: true,
    34  		},
    35  		{
    36  			name:     "force",
    37  			graceful: false,
    38  		},
    39  	}
    40  
    41  	for _, test := range table {
    42  		t.Run(test.name, func(t *testing.T) {
    43  			e := newTopicReaderTestEnv(t)
    44  			e.Start()
    45  
    46  			initialBufferSize := e.reader.restBufferSizeBytes.Load()
    47  			messageSize := initialBufferSize - 1
    48  
    49  			e.stream.EXPECT().Send(&rawtopicreader.ReadRequest{BytesSize: int(messageSize)}).MaxTimes(1)
    50  
    51  			messageReaded := make(empty.Chan)
    52  			e.SendFromServerAndSetNextCallback(&rawtopicreader.ReadResponse{
    53  				BytesSize: int(messageSize),
    54  				PartitionData: []rawtopicreader.PartitionData{
    55  					{
    56  						PartitionSessionID: e.partitionSessionID,
    57  						Batches: []rawtopicreader.Batch{
    58  							{
    59  								Codec:            0,
    60  								ProducerID:       "",
    61  								WriteSessionMeta: nil,
    62  								WrittenAt:        time.Time{},
    63  								MessageData: []rawtopicreader.MessageData{
    64  									{
    65  										Offset: 1,
    66  										SeqNo:  1,
    67  									},
    68  								},
    69  							},
    70  						},
    71  					},
    72  				},
    73  			}, func() {
    74  				close(messageReaded)
    75  			})
    76  			<-messageReaded
    77  			require.Equal(t, int64(1), e.reader.restBufferSizeBytes.Load())
    78  
    79  			partitionStopped := make(empty.Chan)
    80  			e.SendFromServerAndSetNextCallback(&rawtopicreader.StopPartitionSessionRequest{
    81  				ServerMessageMetadata: rawtopiccommon.ServerMessageMetadata{},
    82  				PartitionSessionID:    e.partitionSessionID,
    83  				Graceful:              test.graceful,
    84  				CommittedOffset:       0,
    85  			}, func() {
    86  				close(partitionStopped)
    87  			})
    88  			<-partitionStopped
    89  
    90  			fixedBufferSizeCtx, cancel := context.WithCancel(e.ctx)
    91  			go func() {
    92  				xtest.SpinWaitCondition(t, nil, func() bool {
    93  					return initialBufferSize == e.reader.restBufferSizeBytes.Load()
    94  				})
    95  				cancel()
    96  			}()
    97  
    98  			_, _ = e.reader.ReadMessageBatch(fixedBufferSizeCtx, newReadMessageBatchOptions())
    99  			<-fixedBufferSizeCtx.Done()
   100  			require.Equal(t, initialBufferSize, e.reader.restBufferSizeBytes.Load())
   101  		})
   102  	}
   103  }
   104  
   105  func TestTopicStreamReaderImpl_CommitStolen(t *testing.T) {
   106  	xtest.TestManyTimesWithName(t, "SimpleCommit", func(t testing.TB) {
   107  		e := newTopicReaderTestEnv(t)
   108  		e.Start()
   109  
   110  		lastOffset := e.partitionSession.lastReceivedMessageOffset()
   111  		const dataSize = 4
   112  
   113  		// request new data portion
   114  		readRequestReceived := make(empty.Chan)
   115  		e.stream.EXPECT().Send(&rawtopicreader.ReadRequest{BytesSize: dataSize * 2}).Do(func(_ interface{}) {
   116  			close(readRequestReceived)
   117  		})
   118  
   119  		commitReceived := make(empty.Chan)
   120  		// Expect commit message with stole
   121  		e.stream.EXPECT().Send(
   122  			&rawtopicreader.CommitOffsetRequest{
   123  				CommitOffsets: []rawtopicreader.PartitionCommitOffset{
   124  					{
   125  						PartitionSessionID: e.partitionSessionID,
   126  						Offsets: []rawtopicreader.OffsetRange{
   127  							{
   128  								Start: lastOffset + 1,
   129  								End:   lastOffset + 16,
   130  							},
   131  						},
   132  					},
   133  				},
   134  			},
   135  		).Do(func(req *rawtopicreader.CommitOffsetRequest) {
   136  			close(commitReceived)
   137  		})
   138  
   139  		// send message with stole offsets
   140  		//
   141  		e.SendFromServer(&rawtopicreader.ReadResponse{
   142  			BytesSize: dataSize,
   143  			PartitionData: []rawtopicreader.PartitionData{
   144  				{
   145  					PartitionSessionID: e.partitionSessionID,
   146  					Batches: []rawtopicreader.Batch{
   147  						{
   148  							Codec:      rawtopiccommon.CodecRaw,
   149  							ProducerID: "1",
   150  							MessageData: []rawtopicreader.MessageData{
   151  								{
   152  									Offset: lastOffset + 10,
   153  								},
   154  							},
   155  						},
   156  					},
   157  				},
   158  			},
   159  		})
   160  
   161  		e.SendFromServer(&rawtopicreader.ReadResponse{
   162  			BytesSize: dataSize,
   163  			PartitionData: []rawtopicreader.PartitionData{
   164  				{
   165  					PartitionSessionID: e.partitionSessionID,
   166  					Batches: []rawtopicreader.Batch{
   167  						{
   168  							Codec:      rawtopiccommon.CodecRaw,
   169  							ProducerID: "1",
   170  							MessageData: []rawtopicreader.MessageData{
   171  								{
   172  									Offset: lastOffset + 15,
   173  								},
   174  							},
   175  						},
   176  					},
   177  				},
   178  			},
   179  		})
   180  
   181  		opts := newReadMessageBatchOptions()
   182  		opts.MinCount = 2
   183  		batch, err := e.reader.ReadMessageBatch(e.ctx, opts)
   184  		require.NoError(t, err)
   185  		require.NoError(t, e.reader.Commit(e.ctx, batch.getCommitRange().priv))
   186  		xtest.WaitChannelClosed(t, commitReceived)
   187  		xtest.WaitChannelClosed(t, readRequestReceived)
   188  	})
   189  	xtest.TestManyTimesWithName(t, "WrongOrderCommitWithSyncMode", func(t testing.TB) {
   190  		e := newTopicReaderTestEnv(t)
   191  		e.reader.cfg.CommitMode = CommitModeSync
   192  		e.Start()
   193  
   194  		lastOffset := e.partitionSession.lastReceivedMessageOffset()
   195  		const dataSize = 4
   196  		// request new data portion
   197  		readRequestReceived := make(empty.Chan)
   198  		e.stream.EXPECT().Send(&rawtopicreader.ReadRequest{BytesSize: dataSize * 2}).Do(func(_ interface{}) {
   199  			close(readRequestReceived)
   200  		})
   201  
   202  		e.SendFromServer(&rawtopicreader.ReadResponse{
   203  			BytesSize: dataSize,
   204  			PartitionData: []rawtopicreader.PartitionData{
   205  				{
   206  					PartitionSessionID: e.partitionSessionID,
   207  					Batches: []rawtopicreader.Batch{
   208  						{
   209  							Codec:      rawtopiccommon.CodecRaw,
   210  							ProducerID: "1",
   211  							MessageData: []rawtopicreader.MessageData{
   212  								{
   213  									Offset: lastOffset + 1,
   214  								},
   215  							},
   216  						},
   217  					},
   218  				},
   219  			},
   220  		})
   221  
   222  		e.SendFromServer(&rawtopicreader.ReadResponse{
   223  			BytesSize: dataSize,
   224  			PartitionData: []rawtopicreader.PartitionData{
   225  				{
   226  					PartitionSessionID: e.partitionSessionID,
   227  					Batches: []rawtopicreader.Batch{
   228  						{
   229  							Codec:      rawtopiccommon.CodecRaw,
   230  							ProducerID: "1",
   231  							MessageData: []rawtopicreader.MessageData{
   232  								{
   233  									Offset: lastOffset + 2,
   234  								},
   235  							},
   236  						},
   237  					},
   238  				},
   239  			},
   240  		})
   241  
   242  		opts := newReadMessageBatchOptions()
   243  		opts.MinCount = 2
   244  		batch, err := e.reader.ReadMessageBatch(e.ctx, opts)
   245  		require.NoError(t, err)
   246  		require.ErrorIs(t, e.reader.Commit(e.ctx, batch.Messages[1].getCommitRange().priv), ErrWrongCommitOrderInSyncMode)
   247  		xtest.WaitChannelClosed(t, readRequestReceived)
   248  	})
   249  
   250  	xtest.TestManyTimesWithName(t, "CommitAfterGracefulStopPartition", func(t testing.TB) {
   251  		e := newTopicReaderTestEnv(t)
   252  
   253  		committed := e.partitionSession.committedOffset()
   254  		commitReceived := make(empty.Chan)
   255  		e.stream.EXPECT().Send(&rawtopicreader.CommitOffsetRequest{CommitOffsets: []rawtopicreader.PartitionCommitOffset{
   256  			{
   257  				PartitionSessionID: e.partitionSessionID,
   258  				Offsets: []rawtopicreader.OffsetRange{
   259  					{
   260  						Start: committed,
   261  						End:   committed + 1,
   262  					},
   263  				},
   264  			},
   265  		}}).Do(func(_ interface{}) {
   266  			close(commitReceived)
   267  		}).Return(nil)
   268  
   269  		stopPartitionResponseSent := make(empty.Chan)
   270  		e.stream.EXPECT().Send(&rawtopicreader.StopPartitionSessionResponse{PartitionSessionID: e.partitionSessionID}).
   271  			Do(func(_ interface{}) {
   272  				close(stopPartitionResponseSent)
   273  			}).Return(nil)
   274  
   275  		e.Start()
   276  
   277  		// send from server message, then partition graceful stop request
   278  		go func() {
   279  			e.SendFromServer(&rawtopicreader.ReadResponse{
   280  				PartitionData: []rawtopicreader.PartitionData{
   281  					{
   282  						PartitionSessionID: e.partitionSessionID,
   283  						Batches: []rawtopicreader.Batch{
   284  							{
   285  								Codec: rawtopiccommon.CodecRaw,
   286  								MessageData: []rawtopicreader.MessageData{
   287  									{
   288  										Offset: committed,
   289  										SeqNo:  1,
   290  									},
   291  								},
   292  							},
   293  						},
   294  					},
   295  				},
   296  			})
   297  			e.SendFromServer(&rawtopicreader.StopPartitionSessionRequest{
   298  				PartitionSessionID: e.partitionSessionID,
   299  				Graceful:           true,
   300  			})
   301  		}()
   302  
   303  		readCtx, readCtxCancel := xcontext.WithCancel(e.ctx)
   304  		go func() {
   305  			<-stopPartitionResponseSent
   306  			readCtxCancel()
   307  		}()
   308  
   309  		batch, err := e.reader.ReadMessageBatch(readCtx, newReadMessageBatchOptions())
   310  		require.NoError(t, err)
   311  		err = e.reader.Commit(e.ctx, batch.commitRange)
   312  		require.NoError(t, err)
   313  		_, err = e.reader.ReadMessageBatch(readCtx, newReadMessageBatchOptions())
   314  		require.ErrorIs(t, err, context.Canceled)
   315  
   316  		select {
   317  		case <-e.partitionSession.Context().Done():
   318  			// pass
   319  		case <-time.After(time.Second):
   320  			t.Fatal("partition session not closed")
   321  		}
   322  
   323  		xtest.WaitChannelClosed(t, commitReceived)
   324  	})
   325  }
   326  
   327  func TestTopicStreamReaderImpl_Create(t *testing.T) {
   328  	xtest.TestManyTimesWithName(t, "BadSessionInitialization", func(t testing.TB) {
   329  		mc := gomock.NewController(t)
   330  		stream := NewMockRawTopicReaderStream(mc)
   331  		stream.EXPECT().Send(gomock.Any()).Return(nil)
   332  		stream.EXPECT().Recv().Return(&rawtopicreader.StartPartitionSessionRequest{
   333  			ServerMessageMetadata: rawtopiccommon.ServerMessageMetadata{Status: rawydb.StatusInternalError},
   334  		}, nil)
   335  		stream.EXPECT().CloseSend().Return(nil)
   336  
   337  		reader, err := newTopicStreamReader(nextReaderID(), stream, newTopicStreamReaderConfig())
   338  		require.Error(t, err)
   339  		require.Nil(t, reader)
   340  	})
   341  }
   342  
   343  func TestTopicStreamReaderImpl_WaitInit(t *testing.T) {
   344  	t.Run("OK", func(t *testing.T) {
   345  		e := newTopicReaderTestEnv(t)
   346  		e.Start()
   347  		err := e.reader.WaitInit(context.Background())
   348  		require.NoError(t, err)
   349  	})
   350  
   351  	t.Run("not started", func(t *testing.T) {
   352  		e := newTopicReaderTestEnv(t)
   353  		err := e.reader.WaitInit(context.Background())
   354  		require.Error(t, err)
   355  	})
   356  }
   357  
   358  func TestStreamReaderImpl_OnPartitionCloseHandle(t *testing.T) {
   359  	xtest.TestManyTimesWithName(t, "GracefulFalseCancelPartitionContext", func(t testing.TB) {
   360  		e := newTopicReaderTestEnv(t)
   361  		e.Start()
   362  
   363  		require.NoError(t, e.partitionSession.Context().Err())
   364  
   365  		// stop partition
   366  		e.SendFromServerAndSetNextCallback(
   367  			&rawtopicreader.StopPartitionSessionRequest{PartitionSessionID: e.partitionSessionID},
   368  			func() {
   369  				require.Error(t, e.partitionSession.Context().Err())
   370  			})
   371  		e.WaitMessageReceived()
   372  	})
   373  	xtest.TestManyTimesWithName(t, "TraceGracefulTrue", func(t testing.TB) {
   374  		e := newTopicReaderTestEnv(t)
   375  
   376  		readMessagesCtx, readMessagesCtxCancel := xcontext.WithCancel(context.Background())
   377  		committedOffset := int64(222)
   378  
   379  		e.reader.cfg.Trace.OnReaderPartitionReadStopResponse = func(info trace.TopicReaderPartitionReadStopResponseStartInfo) func(doneInfo trace.TopicReaderPartitionReadStopResponseDoneInfo) { //nolint:lll
   380  			expected := trace.TopicReaderPartitionReadStopResponseStartInfo{
   381  				ReaderConnectionID: e.reader.readConnectionID,
   382  				PartitionContext:   e.partitionSession.ctx,
   383  				Topic:              e.partitionSession.Topic,
   384  				PartitionID:        e.partitionSession.PartitionID,
   385  				PartitionSessionID: e.partitionSession.partitionSessionID.ToInt64(),
   386  				CommittedOffset:    committedOffset,
   387  				Graceful:           true,
   388  			}
   389  			require.Equal(t, expected, info)
   390  
   391  			require.NoError(t, info.PartitionContext.Err())
   392  
   393  			readMessagesCtxCancel()
   394  
   395  			return nil
   396  		}
   397  
   398  		e.Start()
   399  
   400  		stopPartitionResponseSent := make(empty.Chan)
   401  		e.stream.EXPECT().Send(&rawtopicreader.StopPartitionSessionResponse{
   402  			PartitionSessionID: e.partitionSessionID,
   403  		}).Return(nil).Do(func(_ interface{}) {
   404  			close(stopPartitionResponseSent)
   405  		})
   406  
   407  		e.SendFromServer(&rawtopicreader.StopPartitionSessionRequest{
   408  			PartitionSessionID: e.partitionSessionID,
   409  			Graceful:           true,
   410  			CommittedOffset:    rawtopicreader.NewOffset(committedOffset),
   411  		})
   412  
   413  		_, err := e.reader.ReadMessageBatch(readMessagesCtx, newReadMessageBatchOptions())
   414  		require.Error(t, err)
   415  		require.Error(t, readMessagesCtx.Err())
   416  		xtest.WaitChannelClosed(t, stopPartitionResponseSent)
   417  	})
   418  	xtest.TestManyTimesWithName(t, "TraceGracefulFalse", func(t testing.TB) {
   419  		e := newTopicReaderTestEnv(t)
   420  
   421  		readMessagesCtx, readMessagesCtxCancel := xcontext.WithCancel(context.Background())
   422  		committedOffset := int64(222)
   423  
   424  		e.reader.cfg.Trace.OnReaderPartitionReadStopResponse = func(info trace.TopicReaderPartitionReadStopResponseStartInfo) func(doneInfo trace.TopicReaderPartitionReadStopResponseDoneInfo) { //nolint:lll
   425  			expected := trace.TopicReaderPartitionReadStopResponseStartInfo{
   426  				ReaderConnectionID: e.reader.readConnectionID,
   427  				PartitionContext:   e.partitionSession.ctx,
   428  				Topic:              e.partitionSession.Topic,
   429  				PartitionID:        e.partitionSession.PartitionID,
   430  				PartitionSessionID: e.partitionSession.partitionSessionID.ToInt64(),
   431  				CommittedOffset:    committedOffset,
   432  				Graceful:           false,
   433  			}
   434  			require.Equal(t, expected, info)
   435  			require.Error(t, info.PartitionContext.Err())
   436  
   437  			readMessagesCtxCancel()
   438  
   439  			return nil
   440  		}
   441  
   442  		e.Start()
   443  
   444  		e.SendFromServer(&rawtopicreader.StopPartitionSessionRequest{
   445  			PartitionSessionID: e.partitionSessionID,
   446  			Graceful:           false,
   447  			CommittedOffset:    rawtopicreader.NewOffset(committedOffset),
   448  		})
   449  
   450  		_, err := e.reader.ReadMessageBatch(readMessagesCtx, newReadMessageBatchOptions())
   451  		require.Error(t, err)
   452  		require.Error(t, readMessagesCtx.Err())
   453  	})
   454  }
   455  
   456  func TestTopicStreamReaderImpl_ReadMessages(t *testing.T) {
   457  	t.Run("BufferSize", func(t *testing.T) {
   458  		waitChangeRestBufferSizeBytes := func(r *topicStreamReaderImpl, old int64) {
   459  			xtest.SpinWaitCondition(t, nil, func() bool {
   460  				return r.restBufferSizeBytes.Load() != old
   461  			})
   462  		}
   463  
   464  		xtest.TestManyTimesWithName(t, "InitialBufferSize", func(t testing.TB) {
   465  			e := newTopicReaderTestEnv(t)
   466  			e.Start()
   467  			waitChangeRestBufferSizeBytes(e.reader, 0)
   468  			require.Equal(t, e.initialBufferSizeBytes, e.reader.restBufferSizeBytes.Load())
   469  		})
   470  
   471  		xtest.TestManyTimesWithName(t, "DecrementIncrementBufferSize", func(t testing.TB) {
   472  			e := newTopicReaderTestEnv(t)
   473  
   474  			// doesn't check sends
   475  			e.stream.EXPECT().Send(gomock.Any()).Return(nil).MinTimes(1)
   476  
   477  			e.Start()
   478  			waitChangeRestBufferSizeBytes(e.reader, 0)
   479  
   480  			const dataSize = 1000
   481  			e.SendFromServer(&rawtopicreader.ReadResponse{BytesSize: dataSize, PartitionData: []rawtopicreader.PartitionData{
   482  				{
   483  					PartitionSessionID: e.partitionSessionID,
   484  					Batches: []rawtopicreader.Batch{
   485  						{
   486  							MessageData: []rawtopicreader.MessageData{
   487  								{
   488  									Offset: 1,
   489  									SeqNo:  1,
   490  									Data:   []byte{1, 2},
   491  								},
   492  								{
   493  									Offset: 2,
   494  									SeqNo:  2,
   495  									Data:   []byte{4, 5, 6},
   496  								},
   497  								{
   498  									Offset: 3,
   499  									SeqNo:  3,
   500  									Data:   []byte{7},
   501  								},
   502  							},
   503  						},
   504  					},
   505  				},
   506  			}})
   507  			waitChangeRestBufferSizeBytes(e.reader, e.initialBufferSizeBytes)
   508  			expectedBufferSizeAfterReceiveMessages := e.initialBufferSizeBytes - dataSize
   509  			require.Equal(t, expectedBufferSizeAfterReceiveMessages, e.reader.restBufferSizeBytes.Load())
   510  
   511  			oneOption := newReadMessageBatchOptions()
   512  			oneOption.MaxCount = 1
   513  			_, err := e.reader.ReadMessageBatch(e.ctx, oneOption)
   514  			require.NoError(t, err)
   515  
   516  			waitChangeRestBufferSizeBytes(e.reader, expectedBufferSizeAfterReceiveMessages)
   517  
   518  			bufferSizeAfterReadOneMessage := e.reader.restBufferSizeBytes.Load()
   519  
   520  			_, err = e.reader.ReadMessageBatch(e.ctx, newReadMessageBatchOptions())
   521  			require.NoError(t, err)
   522  
   523  			waitChangeRestBufferSizeBytes(e.reader, bufferSizeAfterReadOneMessage)
   524  			require.Equal(t, e.initialBufferSizeBytes, e.reader.restBufferSizeBytes.Load())
   525  		})
   526  
   527  		xtest.TestManyTimesWithName(t, "ForceReturnBatchIfBufferFull", func(t testing.TB) {
   528  			e := newTopicReaderTestEnv(t)
   529  
   530  			dataRequested := make(empty.Chan)
   531  			e.stream.EXPECT().Send(&rawtopicreader.ReadRequest{BytesSize: int(e.initialBufferSizeBytes)}).
   532  				Do(func(_ interface{}) {
   533  					close(dataRequested)
   534  				})
   535  
   536  			e.Start()
   537  			waitChangeRestBufferSizeBytes(e.reader, 0)
   538  
   539  			e.SendFromServer(&rawtopicreader.ReadResponse{
   540  				BytesSize: int(e.initialBufferSizeBytes),
   541  				PartitionData: []rawtopicreader.PartitionData{
   542  					{
   543  						PartitionSessionID: e.partitionSessionID,
   544  						Batches: []rawtopicreader.Batch{
   545  							{
   546  								MessageData: []rawtopicreader.MessageData{
   547  									{
   548  										Offset: 1,
   549  										SeqNo:  1,
   550  										Data:   []byte{1, 2, 3},
   551  									},
   552  								},
   553  							},
   554  						},
   555  					},
   556  				},
   557  			})
   558  			needReadTwoMessages := newReadMessageBatchOptions()
   559  			needReadTwoMessages.MinCount = 2
   560  
   561  			readTimeoutCtx, cancel := xcontext.WithTimeout(e.ctx, time.Second)
   562  			defer cancel()
   563  
   564  			batch, err := e.reader.ReadMessageBatch(readTimeoutCtx, needReadTwoMessages)
   565  			require.NoError(t, err)
   566  			require.Len(t, batch.Messages, 1)
   567  
   568  			<-dataRequested
   569  		})
   570  	})
   571  
   572  	xtest.TestManyTimesWithName(t, "ReadBatch", func(t testing.TB) {
   573  		e := newTopicReaderTestEnv(t)
   574  		e.Start()
   575  
   576  		compress := func(msg string) []byte {
   577  			b := &bytes.Buffer{}
   578  			writer := gzip.NewWriter(b)
   579  			_, err := writer.Write([]byte(msg))
   580  			require.NoError(t, writer.Close())
   581  			require.NoError(t, err)
   582  
   583  			return b.Bytes()
   584  		}
   585  
   586  		prevOffset := e.partitionSession.lastReceivedMessageOffset()
   587  
   588  		sendDataRequestCompleted := make(empty.Chan)
   589  		dataSize := 6
   590  		e.stream.EXPECT().Send(&rawtopicreader.ReadRequest{BytesSize: dataSize}).Do(func(_ interface{}) {
   591  			close(sendDataRequestCompleted)
   592  		})
   593  		e.SendFromServer(&rawtopicreader.ReadResponse{
   594  			BytesSize: dataSize,
   595  			PartitionData: []rawtopicreader.PartitionData{
   596  				{
   597  					PartitionSessionID: e.partitionSessionID,
   598  					Batches: []rawtopicreader.Batch{
   599  						{
   600  							Codec:            rawtopiccommon.CodecRaw,
   601  							WriteSessionMeta: map[string]string{"a": "b", "c": "d"},
   602  							WrittenAt:        testTime(5),
   603  							MessageData: []rawtopicreader.MessageData{
   604  								{
   605  									Offset:           prevOffset + 1,
   606  									SeqNo:            1,
   607  									CreatedAt:        testTime(1),
   608  									Data:             []byte("123"),
   609  									UncompressedSize: 3,
   610  									MessageGroupID:   "1",
   611  								},
   612  								{
   613  									Offset:           prevOffset + 2,
   614  									SeqNo:            2,
   615  									CreatedAt:        testTime(2),
   616  									Data:             []byte("4567"),
   617  									UncompressedSize: 4,
   618  									MessageGroupID:   "1",
   619  								},
   620  							},
   621  						},
   622  						{
   623  							Codec:            rawtopiccommon.CodecGzip,
   624  							WriteSessionMeta: map[string]string{"e": "f", "g": "h"},
   625  							WrittenAt:        testTime(6),
   626  							MessageData: []rawtopicreader.MessageData{
   627  								{
   628  									Offset:           prevOffset + 10,
   629  									SeqNo:            3,
   630  									CreatedAt:        testTime(3),
   631  									Data:             compress("098"),
   632  									UncompressedSize: 3,
   633  									MessageGroupID:   "2",
   634  								},
   635  								{
   636  									Offset:           prevOffset + 20,
   637  									SeqNo:            4,
   638  									CreatedAt:        testTime(4),
   639  									Data:             compress("0987"),
   640  									UncompressedSize: 4,
   641  									MessageGroupID:   "2",
   642  								},
   643  							},
   644  						},
   645  						{
   646  							Codec:            rawtopiccommon.CodecRaw,
   647  							WriteSessionMeta: map[string]string{"a": "b", "c": "d"},
   648  							WrittenAt:        testTime(7),
   649  							MessageData: []rawtopicreader.MessageData{
   650  								{
   651  									Offset:           prevOffset + 30,
   652  									SeqNo:            5,
   653  									CreatedAt:        testTime(5),
   654  									Data:             []byte("test"),
   655  									UncompressedSize: 4,
   656  									MessageGroupID:   "1",
   657  									MetadataItems: []rawtopiccommon.MetadataItem{
   658  										{
   659  											Key:   "first",
   660  											Value: []byte("first-value"),
   661  										},
   662  										{
   663  											Key:   "second",
   664  											Value: []byte("second-value"),
   665  										},
   666  									},
   667  								},
   668  								{
   669  									Offset:           prevOffset + 31,
   670  									SeqNo:            6,
   671  									CreatedAt:        testTime(5),
   672  									Data:             []byte("4567"),
   673  									UncompressedSize: 4,
   674  									MessageGroupID:   "1",
   675  									MetadataItems: []rawtopiccommon.MetadataItem{
   676  										{
   677  											Key:   "doubled-key",
   678  											Value: []byte("bad"),
   679  										},
   680  										{
   681  											Key:   "doubled-key",
   682  											Value: []byte("good"),
   683  										},
   684  									},
   685  								},
   686  							},
   687  						},
   688  					},
   689  				},
   690  			},
   691  		},
   692  		)
   693  
   694  		expectedData := [][]byte{[]byte("123"), []byte("4567"), []byte("098"), []byte("0987"), []byte("test"), []byte("4567")}
   695  		expectedBatch := &PublicBatch{
   696  			commitRange: commitRange{
   697  				commitOffsetStart: prevOffset + 1,
   698  				commitOffsetEnd:   prevOffset + 32,
   699  				partitionSession:  e.partitionSession,
   700  			},
   701  			Messages: []*PublicMessage{
   702  				{
   703  					SeqNo:                1,
   704  					CreatedAt:            testTime(1),
   705  					MessageGroupID:       "1",
   706  					Offset:               prevOffset.ToInt64() + 1,
   707  					WrittenAt:            testTime(5),
   708  					WriteSessionMetadata: map[string]string{"a": "b", "c": "d"},
   709  					UncompressedSize:     3,
   710  					rawDataLen:           3,
   711  					commitRange: commitRange{
   712  						commitOffsetStart: prevOffset + 1,
   713  						commitOffsetEnd:   prevOffset + 2,
   714  						partitionSession:  e.partitionSession,
   715  					},
   716  				},
   717  				{
   718  					SeqNo:                2,
   719  					CreatedAt:            testTime(2),
   720  					MessageGroupID:       "1",
   721  					Offset:               prevOffset.ToInt64() + 2,
   722  					WrittenAt:            testTime(5),
   723  					WriteSessionMetadata: map[string]string{"a": "b", "c": "d"},
   724  					rawDataLen:           4,
   725  					UncompressedSize:     4,
   726  					commitRange: commitRange{
   727  						commitOffsetStart: prevOffset + 2,
   728  						commitOffsetEnd:   prevOffset + 3,
   729  						partitionSession:  e.partitionSession,
   730  					},
   731  				},
   732  				{
   733  					SeqNo:                3,
   734  					CreatedAt:            testTime(3),
   735  					MessageGroupID:       "2",
   736  					Offset:               prevOffset.ToInt64() + 10,
   737  					WrittenAt:            testTime(6),
   738  					WriteSessionMetadata: map[string]string{"e": "f", "g": "h"},
   739  					rawDataLen:           len(compress("098")),
   740  					UncompressedSize:     3,
   741  					commitRange: commitRange{
   742  						commitOffsetStart: prevOffset + 3,
   743  						commitOffsetEnd:   prevOffset + 11,
   744  						partitionSession:  e.partitionSession,
   745  					},
   746  				},
   747  				{
   748  					SeqNo:                4,
   749  					CreatedAt:            testTime(4),
   750  					MessageGroupID:       "2",
   751  					Offset:               prevOffset.ToInt64() + 20,
   752  					WrittenAt:            testTime(6),
   753  					WriteSessionMetadata: map[string]string{"e": "f", "g": "h"},
   754  					rawDataLen:           len(compress("0987")),
   755  					UncompressedSize:     4,
   756  					commitRange: commitRange{
   757  						commitOffsetStart: prevOffset + 11,
   758  						commitOffsetEnd:   prevOffset + 21,
   759  						partitionSession:  e.partitionSession,
   760  					},
   761  				},
   762  				{
   763  					SeqNo:          5,
   764  					CreatedAt:      testTime(5),
   765  					MessageGroupID: "1",
   766  					Metadata: map[string][]byte{
   767  						"first":  []byte("first-value"),
   768  						"second": []byte("second-value"),
   769  					},
   770  					Offset:               prevOffset.ToInt64() + 30,
   771  					WrittenAt:            testTime(7),
   772  					WriteSessionMetadata: map[string]string{"a": "b", "c": "d"},
   773  					UncompressedSize:     4,
   774  					rawDataLen:           4,
   775  					commitRange: commitRange{
   776  						commitOffsetStart: prevOffset + 21,
   777  						commitOffsetEnd:   prevOffset + 31,
   778  						partitionSession:  e.partitionSession,
   779  					},
   780  				},
   781  				{
   782  					SeqNo:          6,
   783  					CreatedAt:      testTime(5),
   784  					MessageGroupID: "1",
   785  					Metadata: map[string][]byte{
   786  						"doubled-key": []byte("good"),
   787  					},
   788  					Offset:               prevOffset.ToInt64() + 31,
   789  					WrittenAt:            testTime(7),
   790  					WriteSessionMetadata: map[string]string{"a": "b", "c": "d"},
   791  					UncompressedSize:     4,
   792  					rawDataLen:           4,
   793  					commitRange: commitRange{
   794  						commitOffsetStart: prevOffset + 31,
   795  						commitOffsetEnd:   prevOffset + 32,
   796  						partitionSession:  e.partitionSession,
   797  					},
   798  				},
   799  			},
   800  		}
   801  
   802  		opts := newReadMessageBatchOptions()
   803  		opts.MinCount = 6
   804  		batch, err := e.reader.ReadMessageBatch(e.ctx, opts)
   805  		require.NoError(t, err)
   806  
   807  		data := make([][]byte, 0, len(batch.Messages))
   808  		for i := range batch.Messages {
   809  			content, err := io.ReadAll(&batch.Messages[i].data)
   810  			require.NoError(t, err)
   811  			data = append(data, content)
   812  			batch.Messages[i].data = newOneTimeReader(nil)
   813  			batch.Messages[i].bufferBytesAccount = 0
   814  		}
   815  
   816  		require.Equal(t, expectedData, data)
   817  		require.Equal(t, expectedBatch, batch)
   818  		<-sendDataRequestCompleted
   819  	})
   820  }
   821  
   822  func TestTopicStreamReadImpl_BatchReaderWantMoreMessagesThenBufferCanHold(t *testing.T) {
   823  	sendMessageWithFullBuffer := func(e *streamEnv) empty.Chan {
   824  		nextDataRequested := make(empty.Chan)
   825  		e.stream.EXPECT().Send(&rawtopicreader.ReadRequest{BytesSize: int(e.initialBufferSizeBytes)}).Do(func(_ interface{}) {
   826  			close(nextDataRequested)
   827  		})
   828  
   829  		e.SendFromServer(
   830  			&rawtopicreader.ReadResponse{
   831  				BytesSize: int(e.initialBufferSizeBytes),
   832  				PartitionData: []rawtopicreader.PartitionData{
   833  					{
   834  						PartitionSessionID: e.partitionSessionID,
   835  						Batches: []rawtopicreader.Batch{
   836  							{
   837  								Codec: rawtopiccommon.CodecRaw,
   838  								MessageData: []rawtopicreader.MessageData{
   839  									{
   840  										Offset: 1,
   841  									},
   842  								},
   843  							},
   844  						},
   845  					},
   846  				},
   847  			})
   848  
   849  		return nextDataRequested
   850  	}
   851  
   852  	xtest.TestManyTimesWithName(t, "ReadAfterMessageInBuffer", func(t testing.TB) {
   853  		e := newTopicReaderTestEnv(t)
   854  		e.Start()
   855  
   856  		nextDataRequested := sendMessageWithFullBuffer(&e)
   857  
   858  		// wait message received to internal buffer
   859  		xtest.SpinWaitCondition(t, &e.reader.batcher.m, func() bool {
   860  			return len(e.reader.batcher.messages) > 0
   861  		})
   862  
   863  		xtest.SpinWaitCondition(t, nil, func() bool {
   864  			return e.reader.restBufferSizeBytes.Load() == 0
   865  		})
   866  
   867  		opts := newReadMessageBatchOptions()
   868  		opts.MinCount = 2
   869  
   870  		readCtx, cancel := xcontext.WithTimeout(e.ctx, time.Second)
   871  		defer cancel()
   872  		batch, err := e.reader.ReadMessageBatch(readCtx, opts)
   873  		require.NoError(t, err)
   874  		require.Len(t, batch.Messages, 1)
   875  		require.Equal(t, int64(1), batch.Messages[0].Offset)
   876  
   877  		<-nextDataRequested
   878  		require.Equal(t, e.initialBufferSizeBytes, e.reader.restBufferSizeBytes.Load())
   879  	})
   880  
   881  	xtest.TestManyTimesWithName(t, "ReadBeforeMessageInBuffer", func(t testing.TB) {
   882  		e := newTopicReaderTestEnv(t)
   883  		e.Start()
   884  
   885  		readCompleted := make(empty.Chan)
   886  		var batch *PublicBatch
   887  		var readErr error
   888  		go func() {
   889  			defer close(readCompleted)
   890  
   891  			opts := newReadMessageBatchOptions()
   892  			opts.MinCount = 2
   893  
   894  			readCtx, cancel := xcontext.WithTimeout(e.ctx, time.Second)
   895  			defer cancel()
   896  			batch, readErr = e.reader.ReadMessageBatch(readCtx, opts)
   897  		}()
   898  
   899  		// wait to start pop
   900  		e.reader.batcher.notifyAboutNewMessages()
   901  		xtest.SpinWaitCondition(t, &e.reader.batcher.m, func() bool {
   902  			return len(e.reader.batcher.hasNewMessages) == 0
   903  		})
   904  
   905  		nextDataRequested := sendMessageWithFullBuffer(&e)
   906  
   907  		<-readCompleted
   908  		require.NoError(t, readErr)
   909  		require.Len(t, batch.Messages, 1)
   910  		require.Equal(t, int64(1), batch.Messages[0].Offset)
   911  
   912  		<-nextDataRequested
   913  		require.Equal(t, e.initialBufferSizeBytes, e.reader.restBufferSizeBytes.Load())
   914  	})
   915  }
   916  
   917  func TestTopicStreamReadImpl_CommitWithBadSession(t *testing.T) {
   918  	commitByMode := func(mode PublicCommitMode) error {
   919  		sleep := func() {
   920  			time.Sleep(time.Second / 10)
   921  		}
   922  		e := newTopicReaderTestEnv(t)
   923  		e.reader.cfg.CommitMode = mode
   924  		e.Start()
   925  
   926  		cr := commitRange{
   927  			partitionSession: newPartitionSession(
   928  				context.Background(),
   929  				"asd",
   930  				123,
   931  				nextReaderID(),
   932  				"bad-connection-id",
   933  				222,
   934  				213,
   935  			),
   936  		}
   937  		commitErr := e.reader.Commit(e.ctx, cr)
   938  
   939  		sleep()
   940  
   941  		require.False(t, e.reader.closed)
   942  
   943  		return commitErr
   944  	}
   945  	t.Run("CommitModeNone", func(t *testing.T) {
   946  		require.ErrorIs(t, commitByMode(CommitModeNone), ErrCommitDisabled)
   947  	})
   948  	t.Run("CommitModeSync", func(t *testing.T) {
   949  		require.ErrorIs(t, commitByMode(CommitModeSync), PublicErrCommitSessionToExpiredSession)
   950  	})
   951  	t.Run("CommitModeAsync", func(t *testing.T) {
   952  		require.NoError(t, commitByMode(CommitModeAsync))
   953  	})
   954  }
   955  
   956  type streamEnv struct {
   957  	ctx                    context.Context
   958  	t                      testing.TB
   959  	reader                 *topicStreamReaderImpl
   960  	stopReadEvents         empty.Chan
   961  	stream                 *MockRawTopicReaderStream
   962  	partitionSessionID     partitionSessionID
   963  	mc                     *gomock.Controller
   964  	partitionSession       *partitionSession
   965  	initialBufferSizeBytes int64
   966  
   967  	m                          xsync.Mutex
   968  	messagesFromServerToClient chan testStreamResult
   969  	nextMessageNeedCallback    func()
   970  }
   971  
   972  type testStreamResult struct {
   973  	nextMessageCallback func()
   974  	msg                 rawtopicreader.ServerMessage
   975  	err                 error
   976  	waitOnly            bool
   977  }
   978  
   979  func newTopicReaderTestEnv(t testing.TB) streamEnv {
   980  	ctx := xtest.Context(t)
   981  
   982  	mc := gomock.NewController(t)
   983  
   984  	stream := NewMockRawTopicReaderStream(mc)
   985  
   986  	const initialBufferSizeBytes = 1000000
   987  
   988  	cfg := newTopicStreamReaderConfig()
   989  	cfg.BaseContext = ctx
   990  	cfg.BufferSizeProtoBytes = initialBufferSizeBytes
   991  	cfg.CommitterBatchTimeLag = 0
   992  
   993  	reader := newTopicStreamReaderStopped(nextReaderID(), stream, cfg)
   994  	// reader.initSession() - skip stream level initialization
   995  
   996  	const testPartitionID = 5
   997  	const testSessionID = 15
   998  	const testSessionComitted = 20
   999  
  1000  	session := newPartitionSession(
  1001  		ctx,
  1002  		"/test",
  1003  		testPartitionID,
  1004  		reader.readerID,
  1005  		reader.readConnectionID,
  1006  		testSessionID,
  1007  		testSessionComitted,
  1008  	)
  1009  	require.NoError(t, reader.sessionController.Add(session))
  1010  
  1011  	env := streamEnv{
  1012  		ctx:                        ctx,
  1013  		t:                          t,
  1014  		initialBufferSizeBytes:     initialBufferSizeBytes,
  1015  		reader:                     reader,
  1016  		stopReadEvents:             make(empty.Chan),
  1017  		stream:                     stream,
  1018  		messagesFromServerToClient: make(chan testStreamResult),
  1019  		partitionSession:           session,
  1020  		partitionSessionID:         session.partitionSessionID,
  1021  		mc:                         mc,
  1022  	}
  1023  
  1024  	stream.EXPECT().Recv().AnyTimes().DoAndReturn(env.receiveMessageHandler)
  1025  
  1026  	// initial data request
  1027  	stream.EXPECT().Send(&rawtopicreader.ReadRequest{BytesSize: initialBufferSizeBytes}).MaxTimes(1)
  1028  
  1029  	// allow in test send data without explicit sizes
  1030  	stream.EXPECT().Send(&rawtopicreader.ReadRequest{BytesSize: 0}).AnyTimes()
  1031  
  1032  	streamClosed := make(empty.Chan)
  1033  	stream.EXPECT().CloseSend().Return(nil).Do(func() {
  1034  		close(streamClosed)
  1035  	})
  1036  
  1037  	t.Cleanup(func() {
  1038  		cleanupTimeout, cancel := xcontext.WithTimeout(context.Background(), time.Second)
  1039  		defer cancel()
  1040  
  1041  		close(env.stopReadEvents)
  1042  		_ = env.reader.CloseWithError(ctx, errors.New("test finished"))
  1043  		require.NoError(t, cleanupTimeout.Err())
  1044  		xtest.WaitChannelClosed(t, streamClosed)
  1045  	})
  1046  
  1047  	t.Cleanup(func() {
  1048  		if messLen := len(env.messagesFromServerToClient); messLen != 0 {
  1049  			t.Fatalf("not all messages consumed from server: %v", messLen)
  1050  		}
  1051  	})
  1052  
  1053  	//nolint:govet
  1054  	return env
  1055  }
  1056  
  1057  func (e *streamEnv) Start() {
  1058  	require.NoError(e.t, e.reader.startLoops())
  1059  	xtest.SpinWaitCondition(e.t, nil, func() bool {
  1060  		return e.reader.restBufferSizeBytes.Load() == e.initialBufferSizeBytes
  1061  	})
  1062  }
  1063  
  1064  func (e *streamEnv) readerReceiveWaitClose(callback func()) {
  1065  	e.stream.EXPECT().Recv().Do(func() {
  1066  		if callback != nil {
  1067  			callback()
  1068  		}
  1069  		<-e.ctx.Done()
  1070  	}).Return(nil, errors.New("test reader closed"))
  1071  }
  1072  
  1073  func (e *streamEnv) SendFromServer(msg rawtopicreader.ServerMessage) {
  1074  	e.SendFromServerAndSetNextCallback(msg, nil)
  1075  }
  1076  
  1077  func (e *streamEnv) SendFromServerAndSetNextCallback(msg rawtopicreader.ServerMessage, callback func()) {
  1078  	if msg.StatusData().Status == 0 {
  1079  		msg.SetStatus(rawydb.StatusSuccess)
  1080  	}
  1081  	e.messagesFromServerToClient <- testStreamResult{msg: msg, nextMessageCallback: callback}
  1082  }
  1083  
  1084  func (e *streamEnv) WaitMessageReceived() {
  1085  	e.messagesFromServerToClient <- testStreamResult{waitOnly: true}
  1086  }
  1087  
  1088  func (e *streamEnv) receiveMessageHandler() (rawtopicreader.ServerMessage, error) {
  1089  	if e.ctx.Err() != nil {
  1090  		return nil, e.ctx.Err()
  1091  	}
  1092  
  1093  	var callback func()
  1094  	e.m.WithLock(func() {
  1095  		callback = e.nextMessageNeedCallback
  1096  		e.nextMessageNeedCallback = nil
  1097  	})
  1098  
  1099  	if callback != nil {
  1100  		callback()
  1101  	}
  1102  
  1103  readMessages:
  1104  	for {
  1105  		select {
  1106  		case <-e.ctx.Done():
  1107  			return nil, e.ctx.Err()
  1108  		case <-e.stopReadEvents:
  1109  			return nil, xerrors.Wrap(errors.New("mock reader closed"))
  1110  		case res := <-e.messagesFromServerToClient:
  1111  			if res.waitOnly {
  1112  				continue readMessages
  1113  			}
  1114  			e.m.WithLock(func() {
  1115  				e.nextMessageNeedCallback = res.nextMessageCallback
  1116  			})
  1117  
  1118  			return res.msg, res.err
  1119  		}
  1120  	}
  1121  }