github.com/cloudreve/Cloudreve/v3@v3.0.0-20240224133659-3edb00a6484c/pkg/filesystem/chunk/chunk_test.go (about)

     1  package chunk
     2  
     3  import (
     4  	"errors"
     5  	"github.com/cloudreve/Cloudreve/v3/pkg/filesystem/chunk/backoff"
     6  	"github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx"
     7  	"github.com/stretchr/testify/assert"
     8  	"io"
     9  	"os"
    10  	"strings"
    11  	"testing"
    12  )
    13  
    14  func TestNewChunkGroup(t *testing.T) {
    15  	a := assert.New(t)
    16  
    17  	testCases := []struct {
    18  		fileSize               uint64
    19  		chunkSize              uint64
    20  		expectedInnerChunkSize uint64
    21  		expectedChunkNum       uint64
    22  		expectedInfo           [][2]int //Start, Index,Length
    23  	}{
    24  		{10, 0, 10, 1, [][2]int{{0, 10}}},
    25  		{0, 0, 0, 1, [][2]int{{0, 0}}},
    26  		{0, 10, 10, 1, [][2]int{{0, 0}}},
    27  		{50, 10, 10, 5, [][2]int{
    28  			{0, 10},
    29  			{10, 10},
    30  			{20, 10},
    31  			{30, 10},
    32  			{40, 10},
    33  		}},
    34  		{50, 50, 50, 1, [][2]int{
    35  			{0, 50},
    36  		}},
    37  
    38  		{50, 15, 15, 4, [][2]int{
    39  			{0, 15},
    40  			{15, 15},
    41  			{30, 15},
    42  			{45, 5},
    43  		}},
    44  	}
    45  
    46  	for index, testCase := range testCases {
    47  		file := &fsctx.FileStream{Size: testCase.fileSize}
    48  		chunkGroup := NewChunkGroup(file, testCase.chunkSize, &backoff.ConstantBackoff{}, true)
    49  		a.EqualValues(testCase.expectedChunkNum, chunkGroup.Num(),
    50  			"TestCase:%d,ChunkNum()", index)
    51  		a.EqualValues(testCase.expectedInnerChunkSize, chunkGroup.chunkSize,
    52  			"TestCase:%d,InnerChunkSize()", index)
    53  		a.EqualValues(testCase.expectedChunkNum, chunkGroup.Num(),
    54  			"TestCase:%d,len(Chunks)", index)
    55  		a.EqualValues(testCase.fileSize, chunkGroup.Total())
    56  
    57  		for cIndex, info := range testCase.expectedInfo {
    58  			a.True(chunkGroup.Next())
    59  			a.EqualValues(info[1], chunkGroup.Length(),
    60  				"TestCase:%d,Chunks[%d].Length()", index, cIndex)
    61  			a.EqualValues(info[0], chunkGroup.Start(),
    62  				"TestCase:%d,Chunks[%d].Start()", index, cIndex)
    63  
    64  			a.Equal(cIndex == len(testCase.expectedInfo)-1, chunkGroup.IsLast(),
    65  				"TestCase:%d,Chunks[%d].IsLast()", index, cIndex)
    66  
    67  			a.NotEmpty(chunkGroup.RangeHeader())
    68  		}
    69  		a.False(chunkGroup.Next())
    70  	}
    71  }
    72  
    73  func TestChunkGroup_TempAvailablet(t *testing.T) {
    74  	a := assert.New(t)
    75  
    76  	file := &fsctx.FileStream{Size: 1}
    77  	c := NewChunkGroup(file, 0, &backoff.ConstantBackoff{}, true)
    78  	a.False(c.TempAvailable())
    79  
    80  	f, err := os.CreateTemp("", "TestChunkGroup_TempAvailablet.*")
    81  	defer func() {
    82  		f.Close()
    83  		os.Remove(f.Name())
    84  	}()
    85  	a.NoError(err)
    86  	c.bufferTemp = f
    87  
    88  	a.False(c.TempAvailable())
    89  	f.Write([]byte("1"))
    90  	a.True(c.TempAvailable())
    91  
    92  }
    93  
    94  func TestChunkGroup_Process(t *testing.T) {
    95  	a := assert.New(t)
    96  	file := &fsctx.FileStream{Size: 10}
    97  
    98  	// success
    99  	{
   100  		file.File = io.NopCloser(strings.NewReader("1234567890"))
   101  		c := NewChunkGroup(file, 5, &backoff.ConstantBackoff{}, true)
   102  		count := 0
   103  		a.True(c.Next())
   104  		a.NoError(c.Process(func(c *ChunkGroup, chunk io.Reader) error {
   105  			count++
   106  			res, err := io.ReadAll(chunk)
   107  			a.NoError(err)
   108  			a.EqualValues("12345", string(res))
   109  			return nil
   110  		}))
   111  		a.True(c.Next())
   112  		a.NoError(c.Process(func(c *ChunkGroup, chunk io.Reader) error {
   113  			count++
   114  			res, err := io.ReadAll(chunk)
   115  			a.NoError(err)
   116  			a.EqualValues("67890", string(res))
   117  			return nil
   118  		}))
   119  		a.False(c.Next())
   120  		a.Equal(2, count)
   121  	}
   122  
   123  	// retry, read from buffer file
   124  	{
   125  		file.File = io.NopCloser(strings.NewReader("1234567890"))
   126  		c := NewChunkGroup(file, 5, &backoff.ConstantBackoff{Max: 2}, true)
   127  		count := 0
   128  		a.True(c.Next())
   129  		a.NoError(c.Process(func(c *ChunkGroup, chunk io.Reader) error {
   130  			count++
   131  			res, err := io.ReadAll(chunk)
   132  			a.NoError(err)
   133  			a.EqualValues("12345", string(res))
   134  			return nil
   135  		}))
   136  		a.True(c.Next())
   137  		a.NoError(c.Process(func(c *ChunkGroup, chunk io.Reader) error {
   138  			count++
   139  			res, err := io.ReadAll(chunk)
   140  			a.NoError(err)
   141  			a.EqualValues("67890", string(res))
   142  			if count == 2 {
   143  				return errors.New("error")
   144  			}
   145  			return nil
   146  		}))
   147  		a.False(c.Next())
   148  		a.Equal(3, count)
   149  	}
   150  
   151  	// retry, read from seeker
   152  	{
   153  		f, _ := os.CreateTemp("", "TestChunkGroup_Process.*")
   154  		f.Write([]byte("1234567890"))
   155  		f.Seek(0, 0)
   156  		defer func() {
   157  			f.Close()
   158  			os.Remove(f.Name())
   159  		}()
   160  		file.File = f
   161  		file.Seeker = f
   162  		c := NewChunkGroup(file, 5, &backoff.ConstantBackoff{Max: 2}, false)
   163  		count := 0
   164  		a.True(c.Next())
   165  		a.NoError(c.Process(func(c *ChunkGroup, chunk io.Reader) error {
   166  			count++
   167  			res, err := io.ReadAll(chunk)
   168  			a.NoError(err)
   169  			a.EqualValues("12345", string(res))
   170  			return nil
   171  		}))
   172  		a.True(c.Next())
   173  		a.NoError(c.Process(func(c *ChunkGroup, chunk io.Reader) error {
   174  			count++
   175  			res, err := io.ReadAll(chunk)
   176  			a.NoError(err)
   177  			a.EqualValues("67890", string(res))
   178  			if count == 2 {
   179  				return errors.New("error")
   180  			}
   181  			return nil
   182  		}))
   183  		a.False(c.Next())
   184  		a.Equal(3, count)
   185  	}
   186  
   187  	// retry, seek error
   188  	{
   189  		f, _ := os.CreateTemp("", "TestChunkGroup_Process.*")
   190  		f.Write([]byte("1234567890"))
   191  		f.Seek(0, 0)
   192  		defer func() {
   193  			f.Close()
   194  			os.Remove(f.Name())
   195  		}()
   196  		file.File = f
   197  		file.Seeker = f
   198  		c := NewChunkGroup(file, 5, &backoff.ConstantBackoff{Max: 2}, false)
   199  		count := 0
   200  		a.True(c.Next())
   201  		a.NoError(c.Process(func(c *ChunkGroup, chunk io.Reader) error {
   202  			count++
   203  			res, err := io.ReadAll(chunk)
   204  			a.NoError(err)
   205  			a.EqualValues("12345", string(res))
   206  			return nil
   207  		}))
   208  		a.True(c.Next())
   209  		f.Close()
   210  		a.Error(c.Process(func(c *ChunkGroup, chunk io.Reader) error {
   211  			count++
   212  			if count == 2 {
   213  				return errors.New("error")
   214  			}
   215  			return nil
   216  		}))
   217  		a.False(c.Next())
   218  		a.Equal(2, count)
   219  	}
   220  
   221  	// retry, finally error
   222  	{
   223  		f, _ := os.CreateTemp("", "TestChunkGroup_Process.*")
   224  		f.Write([]byte("1234567890"))
   225  		f.Seek(0, 0)
   226  		defer func() {
   227  			f.Close()
   228  			os.Remove(f.Name())
   229  		}()
   230  		file.File = f
   231  		file.Seeker = f
   232  		c := NewChunkGroup(file, 5, &backoff.ConstantBackoff{Max: 2}, false)
   233  		count := 0
   234  		a.True(c.Next())
   235  		a.NoError(c.Process(func(c *ChunkGroup, chunk io.Reader) error {
   236  			count++
   237  			res, err := io.ReadAll(chunk)
   238  			a.NoError(err)
   239  			a.EqualValues("12345", string(res))
   240  			return nil
   241  		}))
   242  		a.True(c.Next())
   243  		a.Error(c.Process(func(c *ChunkGroup, chunk io.Reader) error {
   244  			count++
   245  			return errors.New("error")
   246  		}))
   247  		a.False(c.Next())
   248  		a.Equal(4, count)
   249  	}
   250  }