github.com/ari-anchor/sei-tendermint@v0.0.0-20230519144642-dc826b7b56bb/internal/libs/autofile/group_test.go (about)

     1  package autofile
     2  
     3  import (
     4  	"context"
     5  	"io"
     6  	"os"
     7  	"path/filepath"
     8  	"testing"
     9  
    10  	"github.com/stretchr/testify/assert"
    11  	"github.com/stretchr/testify/require"
    12  
    13  	"github.com/ari-anchor/sei-tendermint/libs/log"
    14  	tmos "github.com/ari-anchor/sei-tendermint/libs/os"
    15  	tmrand "github.com/ari-anchor/sei-tendermint/libs/rand"
    16  )
    17  
    18  func createTestGroupWithHeadSizeLimit(ctx context.Context, t *testing.T, logger log.Logger, headSizeLimit int64) *Group {
    19  	testID := tmrand.Str(12)
    20  	testDir := "_test_" + testID
    21  	err := tmos.EnsureDir(testDir, 0700)
    22  	require.NoError(t, err, "Error creating dir")
    23  
    24  	headPath := testDir + "/myfile"
    25  	g, err := OpenGroup(ctx, logger, headPath, GroupHeadSizeLimit(headSizeLimit))
    26  	require.NoError(t, err, "Error opening Group")
    27  	require.NotEqual(t, nil, g, "Failed to create Group")
    28  
    29  	return g
    30  }
    31  
    32  func destroyTestGroup(t *testing.T, g *Group) {
    33  	g.Close()
    34  
    35  	err := os.RemoveAll(g.Dir)
    36  	require.NoError(t, err, "Error removing test Group directory")
    37  }
    38  
    39  func assertGroupInfo(t *testing.T, gInfo GroupInfo, minIndex, maxIndex int, totalSize, headSize int64) {
    40  	assert.Equal(t, minIndex, gInfo.MinIndex)
    41  	assert.Equal(t, maxIndex, gInfo.MaxIndex)
    42  	assert.Equal(t, totalSize, gInfo.TotalSize)
    43  	assert.Equal(t, headSize, gInfo.HeadSize)
    44  }
    45  
    46  func TestCheckHeadSizeLimit(t *testing.T) {
    47  	ctx, cancel := context.WithCancel(context.Background())
    48  	defer cancel()
    49  
    50  	logger := log.NewNopLogger()
    51  
    52  	g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 1000*1000)
    53  
    54  	// At first, there are no files.
    55  	assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 0, 0)
    56  
    57  	// Write 1000 bytes 999 times.
    58  	for i := 0; i < 999; i++ {
    59  		err := g.WriteLine(tmrand.Str(999))
    60  		require.NoError(t, err, "Error appending to head")
    61  	}
    62  	err := g.FlushAndSync()
    63  	require.NoError(t, err)
    64  	assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000)
    65  
    66  	// Even calling checkHeadSizeLimit manually won't rotate it.
    67  	g.checkHeadSizeLimit(ctx)
    68  	assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000)
    69  
    70  	// Write 1000 more bytes.
    71  	err = g.WriteLine(tmrand.Str(999))
    72  	require.NoError(t, err, "Error appending to head")
    73  	err = g.FlushAndSync()
    74  	require.NoError(t, err)
    75  
    76  	// Calling checkHeadSizeLimit this time rolls it.
    77  	g.checkHeadSizeLimit(ctx)
    78  	assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 1000000, 0)
    79  
    80  	// Write 1000 more bytes.
    81  	err = g.WriteLine(tmrand.Str(999))
    82  	require.NoError(t, err, "Error appending to head")
    83  	err = g.FlushAndSync()
    84  	require.NoError(t, err)
    85  
    86  	// Calling checkHeadSizeLimit does nothing.
    87  	g.checkHeadSizeLimit(ctx)
    88  	assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 1001000, 1000)
    89  
    90  	// Write 1000 bytes 999 times.
    91  	for i := 0; i < 999; i++ {
    92  		err = g.WriteLine(tmrand.Str(999))
    93  		require.NoError(t, err, "Error appending to head")
    94  	}
    95  	err = g.FlushAndSync()
    96  	require.NoError(t, err)
    97  	assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 2000000, 1000000)
    98  
    99  	// Calling checkHeadSizeLimit rolls it again.
   100  	g.checkHeadSizeLimit(ctx)
   101  	assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2000000, 0)
   102  
   103  	// Write 1000 more bytes.
   104  	_, err = g.Head.Write([]byte(tmrand.Str(999) + "\n"))
   105  	require.NoError(t, err, "Error appending to head")
   106  	err = g.FlushAndSync()
   107  	require.NoError(t, err)
   108  	assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000)
   109  
   110  	// Calling checkHeadSizeLimit does nothing.
   111  	g.checkHeadSizeLimit(ctx)
   112  	assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000)
   113  
   114  	// Cleanup
   115  	destroyTestGroup(t, g)
   116  }
   117  
   118  func TestRotateFile(t *testing.T) {
   119  	logger := log.NewNopLogger()
   120  
   121  	ctx, cancel := context.WithCancel(context.Background())
   122  	defer cancel()
   123  	g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 0)
   124  
   125  	// Create a different temporary directory and move into it, to make sure
   126  	// relative paths are resolved at Group creation
   127  	origDir, err := os.Getwd()
   128  	require.NoError(t, err)
   129  	defer func() {
   130  		if err := os.Chdir(origDir); err != nil {
   131  			t.Error(err)
   132  		}
   133  	}()
   134  
   135  	dir := t.TempDir()
   136  	require.NoError(t, os.Chdir(dir))
   137  
   138  	require.True(t, filepath.IsAbs(g.Head.Path))
   139  	require.True(t, filepath.IsAbs(g.Dir))
   140  
   141  	// Create and rotate files
   142  	err = g.WriteLine("Line 1")
   143  	require.NoError(t, err)
   144  	err = g.WriteLine("Line 2")
   145  	require.NoError(t, err)
   146  	err = g.WriteLine("Line 3")
   147  	require.NoError(t, err)
   148  	err = g.FlushAndSync()
   149  	require.NoError(t, err)
   150  	g.rotateFile(ctx)
   151  	err = g.WriteLine("Line 4")
   152  	require.NoError(t, err)
   153  	err = g.WriteLine("Line 5")
   154  	require.NoError(t, err)
   155  	err = g.WriteLine("Line 6")
   156  	require.NoError(t, err)
   157  	err = g.FlushAndSync()
   158  	require.NoError(t, err)
   159  
   160  	// Read g.Head.Path+"000"
   161  	body1, err := os.ReadFile(g.Head.Path + ".000")
   162  	assert.NoError(t, err, "Failed to read first rolled file")
   163  	if string(body1) != "Line 1\nLine 2\nLine 3\n" {
   164  		t.Errorf("got unexpected contents: [%v]", string(body1))
   165  	}
   166  
   167  	// Read g.Head.Path
   168  	body2, err := os.ReadFile(g.Head.Path)
   169  	assert.NoError(t, err, "Failed to read first rolled file")
   170  	if string(body2) != "Line 4\nLine 5\nLine 6\n" {
   171  		t.Errorf("got unexpected contents: [%v]", string(body2))
   172  	}
   173  
   174  	// Make sure there are no files in the current, temporary directory
   175  	files, err := os.ReadDir(".")
   176  	require.NoError(t, err)
   177  	assert.Empty(t, files)
   178  
   179  	// Cleanup
   180  	destroyTestGroup(t, g)
   181  }
   182  
   183  func TestWrite(t *testing.T) {
   184  	logger := log.NewNopLogger()
   185  
   186  	ctx, cancel := context.WithCancel(context.Background())
   187  	defer cancel()
   188  
   189  	g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 0)
   190  
   191  	written := []byte("Medusa")
   192  	_, err := g.Write(written)
   193  	require.NoError(t, err)
   194  	err = g.FlushAndSync()
   195  	require.NoError(t, err)
   196  
   197  	read := make([]byte, len(written))
   198  	gr, err := g.NewReader(0)
   199  	require.NoError(t, err, "failed to create reader")
   200  
   201  	_, err = gr.Read(read)
   202  	assert.NoError(t, err, "failed to read data")
   203  	assert.Equal(t, written, read)
   204  
   205  	// Cleanup
   206  	destroyTestGroup(t, g)
   207  }
   208  
   209  // test that Read reads the required amount of bytes from all the files in the
   210  // group and returns no error if n == size of the given slice.
   211  func TestGroupReaderRead(t *testing.T) {
   212  	logger := log.NewNopLogger()
   213  
   214  	ctx, cancel := context.WithCancel(context.Background())
   215  	defer cancel()
   216  
   217  	g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 0)
   218  
   219  	professor := []byte("Professor Monster")
   220  	_, err := g.Write(professor)
   221  	require.NoError(t, err)
   222  	err = g.FlushAndSync()
   223  	require.NoError(t, err)
   224  	g.rotateFile(ctx)
   225  	frankenstein := []byte("Frankenstein's Monster")
   226  	_, err = g.Write(frankenstein)
   227  	require.NoError(t, err)
   228  	err = g.FlushAndSync()
   229  	require.NoError(t, err)
   230  
   231  	totalWrittenLength := len(professor) + len(frankenstein)
   232  	read := make([]byte, totalWrittenLength)
   233  	gr, err := g.NewReader(0)
   234  	require.NoError(t, err, "failed to create reader")
   235  
   236  	n, err := gr.Read(read)
   237  	assert.NoError(t, err, "failed to read data")
   238  	assert.Equal(t, totalWrittenLength, n, "not enough bytes read")
   239  	professorPlusFrankenstein := professor
   240  	professorPlusFrankenstein = append(professorPlusFrankenstein, frankenstein...)
   241  	assert.Equal(t, professorPlusFrankenstein, read)
   242  
   243  	// Cleanup
   244  	destroyTestGroup(t, g)
   245  }
   246  
   247  // test that Read returns an error if number of bytes read < size of
   248  // the given slice. Subsequent call should return 0, io.EOF.
   249  func TestGroupReaderRead2(t *testing.T) {
   250  	logger := log.NewNopLogger()
   251  
   252  	ctx, cancel := context.WithCancel(context.Background())
   253  	defer cancel()
   254  
   255  	g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 0)
   256  
   257  	professor := []byte("Professor Monster")
   258  	_, err := g.Write(professor)
   259  	require.NoError(t, err)
   260  	err = g.FlushAndSync()
   261  	require.NoError(t, err)
   262  	g.rotateFile(ctx)
   263  	frankenstein := []byte("Frankenstein's Monster")
   264  	frankensteinPart := []byte("Frankenstein")
   265  	_, err = g.Write(frankensteinPart) // note writing only a part
   266  	require.NoError(t, err)
   267  	err = g.FlushAndSync()
   268  	require.NoError(t, err)
   269  
   270  	totalLength := len(professor) + len(frankenstein)
   271  	read := make([]byte, totalLength)
   272  	gr, err := g.NewReader(0)
   273  	require.NoError(t, err, "failed to create reader")
   274  
   275  	// 1) n < (size of the given slice), io.EOF
   276  	n, err := gr.Read(read)
   277  	assert.Equal(t, io.EOF, err)
   278  	assert.Equal(t, len(professor)+len(frankensteinPart), n, "Read more/less bytes than it is in the group")
   279  
   280  	// 2) 0, io.EOF
   281  	n, err = gr.Read([]byte("0"))
   282  	assert.Equal(t, io.EOF, err)
   283  	assert.Equal(t, 0, n)
   284  
   285  	// Cleanup
   286  	destroyTestGroup(t, g)
   287  }
   288  
   289  func TestMinIndex(t *testing.T) {
   290  	logger := log.NewNopLogger()
   291  	ctx, cancel := context.WithCancel(context.Background())
   292  	defer cancel()
   293  
   294  	g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 0)
   295  
   296  	assert.Zero(t, g.MinIndex(), "MinIndex should be zero at the beginning")
   297  
   298  	// Cleanup
   299  	destroyTestGroup(t, g)
   300  }
   301  
   302  func TestMaxIndex(t *testing.T) {
   303  	logger := log.NewNopLogger()
   304  	ctx, cancel := context.WithCancel(context.Background())
   305  	defer cancel()
   306  
   307  	g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 0)
   308  
   309  	assert.Zero(t, g.MaxIndex(), "MaxIndex should be zero at the beginning")
   310  
   311  	err := g.WriteLine("Line 1")
   312  	require.NoError(t, err)
   313  	err = g.FlushAndSync()
   314  	require.NoError(t, err)
   315  	g.rotateFile(ctx)
   316  
   317  	assert.Equal(t, 1, g.MaxIndex(), "MaxIndex should point to the last file")
   318  
   319  	// Cleanup
   320  	destroyTestGroup(t, g)
   321  }