code.vegaprotocol.io/vega@v0.79.0/datanode/broker/buffered_event_source_test.go (about)

     1  // Copyright (C) 2023 Gobalsky Labs Limited
     2  //
     3  // This program is free software: you can redistribute it and/or modify
     4  // it under the terms of the GNU Affero General Public License as
     5  // published by the Free Software Foundation, either version 3 of the
     6  // License, or (at your option) any later version.
     7  //
     8  // This program is distributed in the hope that it will be useful,
     9  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    10  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    11  // GNU Affero General Public License for more details.
    12  //
    13  // You should have received a copy of the GNU Affero General Public License
    14  // along with this program.  If not, see <http://www.gnu.org/licenses/>.
    15  
    16  package broker
    17  
    18  import (
    19  	"context"
    20  	"fmt"
    21  	"io/fs"
    22  	"io/ioutil"
    23  	"os"
    24  	"path/filepath"
    25  	"sort"
    26  	"strconv"
    27  	"strings"
    28  	"testing"
    29  	"time"
    30  
    31  	"code.vegaprotocol.io/vega/logging"
    32  
    33  	"github.com/stretchr/testify/assert"
    34  )
    35  
    36  func Test_RemoveOldArchiveFilesIfDirectoryFull(t *testing.T) {
    37  	path := t.TempDir()
    38  	file1, err := os.Create(filepath.Join(path, "datanode-buffer-2023-02-09-20-44-35-1675975475798831800-seqnumspan-1-1000000.gz"))
    39  	assert.NoError(t, err)
    40  	defer func() { _ = file1.Close() }()
    41  
    42  	for i := 0; i < 100; i++ {
    43  		file1.WriteString("A LOAD LOAD OF OLD OLD COBBLERS")
    44  	}
    45  
    46  	file2, err := os.Create(filepath.Join(path, "datanode-buffer-2023-02-09-20-44-41-1675975481217000775-seqnumspan-1000001-2000000.gz"))
    47  	assert.NoError(t, err)
    48  	defer func() { _ = file2.Close() }()
    49  
    50  	for i := 0; i < 100; i++ {
    51  		file2.WriteString("A LOAD LOAD OF OLD OLD COBBLERS")
    52  	}
    53  
    54  	file3, err := os.Create(filepath.Join(path, "datanode-buffer-2023-02-09-20-44-46-1675975486620295637-seqnumspan-2000001-3000000.gz"))
    55  	defer func() { _ = file3.Close() }()
    56  
    57  	assert.NoError(t, err)
    58  	for i := 0; i < 100; i++ {
    59  		file3.WriteString("A LOAD LOAD OF OLD OLD COBBLERS")
    60  	}
    61  
    62  	file4, err := os.Create(filepath.Join(path, "datanode-buffer-2023-02-09-20-45-02-1675975502197534094-seqnumspan-3000001-4000000.gz"))
    63  	defer func() { _ = file4.Close() }()
    64  	assert.NoError(t, err)
    65  	for i := 0; i < 100; i++ {
    66  		file4.WriteString("A LOAD LOAD OF OLD OLD COBBLERS")
    67  	}
    68  
    69  	var preCleanUpSize int64
    70  	err = filepath.Walk(path, func(path string, info fs.FileInfo, err error) error {
    71  		if err != nil || (info != nil && info.IsDir()) {
    72  			return nil //nolint:nilerr
    73  		}
    74  		preCleanUpSize += info.Size()
    75  		return nil
    76  	})
    77  	assert.NoError(t, err)
    78  
    79  	removeOldArchiveFilesIfDirectoryFull(path, preCleanUpSize/2+1)
    80  	var postRemoveFiles []fs.FileInfo
    81  	err = filepath.Walk(path, func(path string, info fs.FileInfo, err error) error {
    82  		if err != nil || (info != nil && info.IsDir()) {
    83  			return nil //nolint:nilerr
    84  		}
    85  		postRemoveFiles = append(postRemoveFiles, info)
    86  		return nil
    87  	})
    88  	assert.NoError(t, err)
    89  
    90  	sort.Slice(postRemoveFiles, func(i, j int) bool {
    91  		return strings.Compare(postRemoveFiles[i].Name(), postRemoveFiles[j].Name()) < 0
    92  	})
    93  
    94  	assert.Equal(t, 2, len(postRemoveFiles))
    95  	assert.Equal(t, "datanode-buffer-2023-02-09-20-44-46-1675975486620295637-seqnumspan-2000001-3000000.gz", postRemoveFiles[0].Name())
    96  	assert.Equal(t, "datanode-buffer-2023-02-09-20-45-02-1675975502197534094-seqnumspan-3000001-4000000.gz", postRemoveFiles[1].Name())
    97  }
    98  
    99  func Test_CompressUncompressedFilesInDir(t *testing.T) {
   100  	path := t.TempDir()
   101  	file1, err := os.Create(filepath.Join(path, "1"))
   102  	assert.NoError(t, err)
   103  	defer func() { _ = file1.Close() }()
   104  
   105  	for i := 0; i < 100; i++ {
   106  		file1.WriteString("A LOAD LOAD OF OLD OLD COBBLERS")
   107  	}
   108  
   109  	file2, err := os.Create(filepath.Join(path, "2"))
   110  	assert.NoError(t, err)
   111  	defer func() { _ = file2.Close() }()
   112  
   113  	for i := 0; i < 100; i++ {
   114  		file2.WriteString("A LOAD LOAD OF OLD OLD COBBLERS")
   115  	}
   116  
   117  	file3, err := os.Create(filepath.Join(path, "3.gz"))
   118  	defer func() { _ = file3.Close() }()
   119  
   120  	assert.NoError(t, err)
   121  	for i := 0; i < 100; i++ {
   122  		file3.WriteString("A LOAD LOAD OF OLD OLD COBBLERS")
   123  	}
   124  
   125  	file4, err := os.Create(filepath.Join(path, "4.gz"))
   126  	defer func() { _ = file4.Close() }()
   127  	assert.NoError(t, err)
   128  	for i := 0; i < 100; i++ {
   129  		file4.WriteString("A LOAD LOAD OF OLD OLD COBBLERS")
   130  	}
   131  
   132  	var preCompressFiles []fs.FileInfo
   133  	err = filepath.Walk(path, func(path string, info fs.FileInfo, err error) error {
   134  		if err != nil || (info != nil && info.IsDir()) {
   135  			return nil //nolint:nilerr
   136  		}
   137  		preCompressFiles = append(preCompressFiles, info)
   138  		return nil
   139  	})
   140  	sort.Slice(preCompressFiles, func(i, j int) bool {
   141  		return strings.Compare(preCompressFiles[i].Name(), preCompressFiles[j].Name()) < 0
   142  	})
   143  
   144  	assert.NoError(t, err)
   145  
   146  	compressUncompressedFilesInDir(path)
   147  
   148  	var postCompressFiles []fs.FileInfo
   149  	err = filepath.Walk(path, func(path string, info fs.FileInfo, err error) error {
   150  		if err != nil || (info != nil && info.IsDir()) {
   151  			return nil //nolint:nilerr
   152  		}
   153  		postCompressFiles = append(postCompressFiles, info)
   154  		return nil
   155  	})
   156  	assert.NoError(t, err)
   157  	sort.Slice(postCompressFiles, func(i, j int) bool {
   158  		return strings.Compare(postCompressFiles[i].Name(), postCompressFiles[j].Name()) < 0
   159  	})
   160  
   161  	assert.Equal(t, len(preCompressFiles), len(postCompressFiles))
   162  
   163  	assert.Equal(t, preCompressFiles[0].Name()+".gz", postCompressFiles[0].Name())
   164  	assert.Equal(t, preCompressFiles[1].Name()+".gz", postCompressFiles[1].Name())
   165  	assert.Equal(t, preCompressFiles[2].Name(), postCompressFiles[2].Name())
   166  	assert.Equal(t, preCompressFiles[3].Name(), postCompressFiles[3].Name())
   167  
   168  	assert.Greater(t, preCompressFiles[0].Size(), postCompressFiles[0].Size())
   169  	assert.Greater(t, preCompressFiles[1].Size(), postCompressFiles[1].Size())
   170  	assert.Equal(t, preCompressFiles[2].Size(), postCompressFiles[2].Size())
   171  	assert.Equal(t, preCompressFiles[3].Size(), postCompressFiles[3].Size())
   172  }
   173  
   174  func Test_FileBufferedEventSource_BufferingDisabledWhenEventsPerFileIsZero(t *testing.T) {
   175  	ctx, cancel := context.WithCancel(context.Background())
   176  	defer cancel()
   177  
   178  	path := t.TempDir()
   179  	archivePath := t.TempDir()
   180  
   181  	eventSource := &testRawEventSource{
   182  		eventsCh: make(chan []byte, 1000),
   183  		errCh:    make(chan error),
   184  	}
   185  
   186  	fb, err := NewBufferedEventSource(ctx, logging.NewTestLogger(), BufferedEventSourceConfig{
   187  		EventsPerFile:         0,
   188  		SendChannelBufferSize: 1000,
   189  	}, eventSource, path, archivePath)
   190  
   191  	assert.NoError(t, err)
   192  
   193  	evtCh, _ := fb.Receive(ctx)
   194  
   195  	numberOfEventsToSend := 100
   196  	for i := 0; i < numberOfEventsToSend; i++ {
   197  		a := []byte("TEST_EVENT_" + strconv.Itoa(i))
   198  		eventSource.eventsCh <- a
   199  	}
   200  
   201  	// This check consumes all events, and after each event buffer file is read it checks that it is removed
   202  	for i := 0; i < numberOfEventsToSend; i++ {
   203  		files, _ := os.ReadDir(path)
   204  		assert.Equal(t, 0, len(files))
   205  		e := <-evtCh
   206  		assert.Equal(t, fmt.Sprintf("TEST_EVENT_%d", i), string(e))
   207  	}
   208  }
   209  
   210  func Test_FileBufferedEventSource_ErrorSentOnPathError(t *testing.T) {
   211  	ctx, cancel := context.WithCancel(context.Background())
   212  	defer cancel()
   213  
   214  	eventSource := &testRawEventSource{
   215  		eventsCh: make(chan []byte),
   216  		errCh:    make(chan error),
   217  	}
   218  
   219  	fb, err := NewBufferedEventSource(ctx, logging.NewTestLogger(), BufferedEventSourceConfig{
   220  		EventsPerFile:         10,
   221  		SendChannelBufferSize: 0,
   222  	}, eventSource, "thepaththatdoesntexist", "")
   223  
   224  	assert.NoError(t, err)
   225  
   226  	_, errCh := fb.Receive(context.Background())
   227  
   228  	eventSource.errCh <- fmt.Errorf("test error")
   229  
   230  	assert.NotNil(t, <-errCh)
   231  }
   232  
   233  func Test_FileBufferedEventSource_ErrorsArePassedThrough(t *testing.T) {
   234  	ctx, cancel := context.WithCancel(context.Background())
   235  	defer cancel()
   236  
   237  	path := t.TempDir()
   238  	archivePath := t.TempDir()
   239  
   240  	eventSource := &testRawEventSource{
   241  		eventsCh: make(chan []byte),
   242  		errCh:    make(chan error),
   243  	}
   244  
   245  	fb, err := NewBufferedEventSource(ctx, logging.NewTestLogger(), BufferedEventSourceConfig{
   246  		EventsPerFile:         10,
   247  		SendChannelBufferSize: 0,
   248  	}, eventSource, path, archivePath)
   249  
   250  	assert.NoError(t, err)
   251  
   252  	_, errCh := fb.Receive(context.Background())
   253  
   254  	eventSource.errCh <- fmt.Errorf("test error")
   255  
   256  	assert.NotNil(t, <-errCh)
   257  }
   258  
   259  func Test_FileBufferedEventSource_EventsAreBufferedAndPassedThrough(t *testing.T) {
   260  	ctx, cancel := context.WithCancel(context.Background())
   261  	defer cancel()
   262  
   263  	path := t.TempDir()
   264  	archivePath := t.TempDir()
   265  
   266  	eventSource := &testRawEventSource{
   267  		eventsCh: make(chan []byte),
   268  		errCh:    make(chan error),
   269  	}
   270  
   271  	fb, err := NewBufferedEventSource(ctx, logging.NewTestLogger(), BufferedEventSourceConfig{
   272  		EventsPerFile:         10,
   273  		SendChannelBufferSize: 0,
   274  	}, eventSource, path, archivePath)
   275  
   276  	assert.NoError(t, err)
   277  
   278  	evtCh, _ := fb.Receive(context.Background())
   279  
   280  	eventSource.eventsCh <- []byte("TEST_EVENT_1")
   281  	eventSource.eventsCh <- []byte("TEST_EVENT_2")
   282  	eventSource.eventsCh <- []byte("TEST_EVENT_3")
   283  
   284  	r1 := <-evtCh
   285  	r2 := <-evtCh
   286  	r3 := <-evtCh
   287  
   288  	assert.Equal(t, []byte("TEST_EVENT_1"), r1)
   289  	assert.Equal(t, []byte("TEST_EVENT_2"), r2)
   290  	assert.Equal(t, []byte("TEST_EVENT_3"), r3)
   291  }
   292  
   293  func Test_FileBufferedEventSource_RollsBufferFiles(t *testing.T) {
   294  	ctx, cancel := context.WithCancel(context.Background())
   295  	defer cancel()
   296  
   297  	path := t.TempDir()
   298  	archivePath := t.TempDir()
   299  
   300  	eventSource := &testRawEventSource{
   301  		eventsCh: make(chan []byte),
   302  		errCh:    make(chan error),
   303  	}
   304  
   305  	eventsPerFile := 10
   306  	fb, err := NewBufferedEventSource(ctx, logging.NewTestLogger(), BufferedEventSourceConfig{
   307  		EventsPerFile:         eventsPerFile,
   308  		SendChannelBufferSize: 0,
   309  	}, eventSource, path, archivePath)
   310  
   311  	assert.NoError(t, err)
   312  
   313  	evtCh, _ := fb.Receive(ctx)
   314  
   315  	numberOfEventsToSend := 100
   316  	for i := 0; i < numberOfEventsToSend; i++ {
   317  		eventSource.eventsCh <- []byte("TEST_EVENT_" + strconv.Itoa(i))
   318  	}
   319  
   320  	// This check consumes all events, and after each event buffer file is read it checks that it is removed
   321  	for i := 0; i < numberOfEventsToSend; i++ {
   322  		if i%eventsPerFile == 0 {
   323  			files, _ := ioutil.ReadDir(path)
   324  			expectedNumFiles := (numberOfEventsToSend - i) / eventsPerFile
   325  
   326  			// As it interacts with disk, there is a bit of asynchronicity, this loop is to ensure that the directory
   327  			// has chance to update. It will timeout if this test fails
   328  			for expectedNumFiles != len(files) {
   329  				files, _ = ioutil.ReadDir(path)
   330  				time.Sleep(5 * time.Millisecond)
   331  			}
   332  
   333  			sort.Slice(files, func(i int, j int) bool {
   334  				return files[i].ModTime().Before(files[j].ModTime())
   335  			})
   336  			for j, f := range files {
   337  				expectedFilename := fmt.Sprintf("datanode-buffer-%d-%d.bevt", (j+i/eventsPerFile)*eventsPerFile+1, (j+1+i/eventsPerFile)*eventsPerFile)
   338  				assert.Equal(t, expectedFilename, f.Name())
   339  			}
   340  		}
   341  
   342  		e := <-evtCh
   343  		assert.Equal(t, []byte("TEST_EVENT_"+strconv.Itoa(i)), e)
   344  	}
   345  }
   346  
   347  type testRawEventSource struct {
   348  	eventsCh chan []byte
   349  	errCh    chan error
   350  }
   351  
   352  func (t *testRawEventSource) Listen() error {
   353  	return nil
   354  }
   355  
   356  func (t *testRawEventSource) Receive(ctx context.Context) (<-chan []byte, <-chan error) {
   357  	return t.eventsCh, t.errCh
   358  }