github.com/matrixorigin/matrixone@v1.2.0/pkg/fileservice/file_service_test.go (about)

     1  // Copyright 2022 Matrix Origin
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package fileservice
    16  
    17  import (
    18  	"bytes"
    19  	"context"
    20  	"crypto/rand"
    21  	"encoding/csv"
    22  	"errors"
    23  	"fmt"
    24  	"io"
    25  	mrand "math/rand"
    26  	"path"
    27  	"sort"
    28  	"strconv"
    29  	"strings"
    30  	"sync"
    31  	"sync/atomic"
    32  	"testing"
    33  	"testing/iotest"
    34  	"time"
    35  
    36  	"github.com/matrixorigin/matrixone/pkg/common/moerr"
    37  	"github.com/matrixorigin/matrixone/pkg/fileservice/memorycache"
    38  	"github.com/matrixorigin/matrixone/pkg/pb/api"
    39  	"github.com/matrixorigin/matrixone/pkg/perfcounter"
    40  	"github.com/stretchr/testify/assert"
    41  )
    42  
    43  func testFileService(
    44  	t *testing.T,
    45  	policy Policy,
    46  	newFS func(name string) FileService,
    47  ) {
    48  
    49  	fsName := time.Now().Format("fs-2006-01-02-15-04-05")
    50  
    51  	t.Run("basic", func(t *testing.T) {
    52  		ctx := context.Background()
    53  		fs := newFS(fsName)
    54  
    55  		assert.True(t, strings.Contains(fs.Name(), fsName))
    56  
    57  		entries, err := fs.List(ctx, "")
    58  		assert.Nil(t, err)
    59  		assert.Equal(t, 0, len(entries))
    60  
    61  		err = fs.Write(ctx, IOVector{
    62  			FilePath: "foo",
    63  			Entries: []IOEntry{
    64  				{
    65  					Offset: 0,
    66  					Size:   4,
    67  					Data:   []byte("1234"),
    68  				},
    69  				{
    70  					Offset: 4,
    71  					Size:   4,
    72  					Data:   []byte("5678"),
    73  				},
    74  				{
    75  					Offset:         8,
    76  					Size:           3,
    77  					ReaderForWrite: bytes.NewReader([]byte("9ab")),
    78  				},
    79  			},
    80  			Policy: policy,
    81  		})
    82  		assert.Nil(t, err)
    83  
    84  		entries, err = fs.List(ctx, "")
    85  		assert.Nil(t, err)
    86  		assert.Equal(t, 1, len(entries))
    87  
    88  		buf1 := new(bytes.Buffer)
    89  		var r io.ReadCloser
    90  		buf2 := make([]byte, 4)
    91  		vec := IOVector{
    92  			FilePath: "foo",
    93  			Entries: []IOEntry{
    94  				0: {
    95  					Offset: 2,
    96  					Size:   2,
    97  				},
    98  				1: {
    99  					Offset: 2,
   100  					Size:   4,
   101  					Data:   buf2,
   102  				},
   103  				2: {
   104  					Offset: 7,
   105  					Size:   1,
   106  				},
   107  				3: {
   108  					Offset: 0,
   109  					Size:   1,
   110  				},
   111  				4: {
   112  					Offset:            0,
   113  					Size:              7,
   114  					ReadCloserForRead: &r,
   115  				},
   116  				5: {
   117  					Offset:        4,
   118  					Size:          2,
   119  					WriterForRead: buf1,
   120  				},
   121  				6: {
   122  					Offset: 0,
   123  					Size:   -1,
   124  				},
   125  			},
   126  			Policy: policy,
   127  		}
   128  		err = fs.Read(ctx, &vec)
   129  		assert.Nil(t, err)
   130  		assert.Equal(t, []byte("34"), vec.Entries[0].Data)
   131  		assert.Equal(t, []byte("3456"), vec.Entries[1].Data)
   132  		assert.Equal(t, []byte("3456"), buf2)
   133  		assert.Equal(t, []byte("8"), vec.Entries[2].Data)
   134  		assert.Equal(t, []byte("1"), vec.Entries[3].Data)
   135  		content, err := io.ReadAll(r)
   136  		assert.Nil(t, err)
   137  		assert.Nil(t, r.Close())
   138  		assert.Equal(t, []byte("1234567"), content)
   139  		assert.Equal(t, []byte("56"), buf1.Bytes())
   140  		assert.Equal(t, []byte("123456789ab"), vec.Entries[6].Data)
   141  
   142  		// stat
   143  		entry, err := fs.StatFile(ctx, "foo")
   144  		assert.Nil(t, err)
   145  		assert.Equal(t, "foo", entry.Name)
   146  		assert.Equal(t, false, entry.IsDir)
   147  		assert.Equal(t, int64(11), entry.Size)
   148  
   149  		// read from non-zero offset
   150  		vec = IOVector{
   151  			FilePath: "foo",
   152  			Entries: []IOEntry{
   153  				{
   154  					Offset: 7,
   155  					Size:   1,
   156  				},
   157  			},
   158  			Policy: policy,
   159  		}
   160  		err = fs.Read(ctx, &vec)
   161  		assert.Nil(t, err)
   162  		assert.Equal(t, []byte("8"), vec.Entries[0].Data)
   163  
   164  		// sub path
   165  		err = fs.Write(ctx, IOVector{
   166  			FilePath: "sub/sub2/sub3",
   167  			Entries: []IOEntry{
   168  				{
   169  					Offset: 0,
   170  					Size:   1,
   171  					Data:   []byte("1"),
   172  				},
   173  			},
   174  			Policy: policy,
   175  		})
   176  		assert.Nil(t, err)
   177  
   178  	})
   179  
   180  	t.Run("WriterForRead", func(t *testing.T) {
   181  		fs := newFS(fsName)
   182  		ctx := context.Background()
   183  
   184  		err := fs.Write(ctx, IOVector{
   185  			FilePath: "foo",
   186  			Entries: []IOEntry{
   187  				{
   188  					Offset: 0,
   189  					Size:   4,
   190  					Data:   []byte("1234"),
   191  				},
   192  			},
   193  			Policy: policy,
   194  		})
   195  		assert.Nil(t, err)
   196  
   197  		buf := new(bytes.Buffer)
   198  		vec := &IOVector{
   199  			FilePath: "foo",
   200  			Entries: []IOEntry{
   201  				{
   202  					Offset:        0,
   203  					Size:          4,
   204  					WriterForRead: buf,
   205  				},
   206  			},
   207  			Policy: policy,
   208  		}
   209  		err = fs.Read(ctx, vec)
   210  		assert.Nil(t, err)
   211  		assert.Equal(t, []byte("1234"), buf.Bytes())
   212  
   213  		buf = new(bytes.Buffer)
   214  		vec = &IOVector{
   215  			FilePath: "foo",
   216  			Entries: []IOEntry{
   217  				{
   218  					Offset:        0,
   219  					Size:          -1,
   220  					WriterForRead: buf,
   221  				},
   222  			},
   223  			Policy: policy,
   224  		}
   225  		err = fs.Read(ctx, vec)
   226  		assert.Nil(t, err)
   227  		assert.Equal(t, []byte("1234"), buf.Bytes())
   228  
   229  	})
   230  
   231  	t.Run("ReadCloserForRead", func(t *testing.T) {
   232  		fs := newFS(fsName)
   233  		ctx := context.Background()
   234  		err := fs.Write(ctx, IOVector{
   235  			FilePath: "foo",
   236  			Entries: []IOEntry{
   237  				{
   238  					Offset: 0,
   239  					Size:   4,
   240  					Data:   []byte("1234"),
   241  				},
   242  			},
   243  			Policy: policy,
   244  		})
   245  		assert.Nil(t, err)
   246  
   247  		var r io.ReadCloser
   248  
   249  		vec := &IOVector{
   250  			FilePath: "foo",
   251  			Entries: []IOEntry{
   252  				{
   253  					Offset:            0,
   254  					Size:              4,
   255  					ReadCloserForRead: &r,
   256  				},
   257  			},
   258  			Policy: policy,
   259  		}
   260  		err = fs.Read(ctx, vec)
   261  		assert.Nil(t, err)
   262  		data, err := io.ReadAll(r)
   263  		assert.Nil(t, err)
   264  		assert.Equal(t, []byte("1234"), data)
   265  		err = r.Close()
   266  		assert.Nil(t, err)
   267  
   268  		vec = &IOVector{
   269  			FilePath: "foo",
   270  			Entries: []IOEntry{
   271  				{
   272  					Offset:            0,
   273  					Size:              3,
   274  					ReadCloserForRead: &r,
   275  				},
   276  			},
   277  			Policy: policy,
   278  		}
   279  		err = fs.Read(ctx, vec)
   280  		assert.Nil(t, err)
   281  		data, err = io.ReadAll(r)
   282  		assert.Nil(t, err)
   283  		assert.Equal(t, []byte("123"), data)
   284  		err = r.Close()
   285  		assert.Nil(t, err)
   286  
   287  		vec = &IOVector{
   288  			FilePath: "foo",
   289  			Entries: []IOEntry{
   290  				{
   291  					Offset:            1,
   292  					Size:              3,
   293  					ReadCloserForRead: &r,
   294  				},
   295  			},
   296  			Policy: policy,
   297  		}
   298  		err = fs.Read(ctx, vec)
   299  		assert.Nil(t, err)
   300  		data, err = io.ReadAll(r)
   301  		assert.Nil(t, err)
   302  		assert.Equal(t, []byte("234"), data)
   303  		err = r.Close()
   304  		assert.Nil(t, err)
   305  
   306  	})
   307  
   308  	t.Run("random", func(t *testing.T) {
   309  		fs := newFS(fsName)
   310  		ctx := context.Background()
   311  
   312  		for i := 0; i < 8; i++ {
   313  			filePath := fmt.Sprintf("%d", mrand.Int63())
   314  
   315  			// random content
   316  			content := make([]byte, _BlockContentSize*4)
   317  			_, err := rand.Read(content)
   318  			assert.Nil(t, err)
   319  			parts := randomSplit(content, 32)
   320  
   321  			// write
   322  			writeVector := IOVector{
   323  				FilePath: filePath,
   324  				Policy:   policy,
   325  			}
   326  			offset := int64(0)
   327  			for _, part := range parts {
   328  				writeVector.Entries = append(writeVector.Entries, IOEntry{
   329  					Offset: offset,
   330  					Size:   int64(len(part)),
   331  					Data:   part,
   332  				})
   333  				offset += int64(len(part))
   334  			}
   335  			err = fs.Write(ctx, writeVector)
   336  			assert.Nil(t, err)
   337  
   338  			// read, align to write vector
   339  			readVector := &IOVector{
   340  				FilePath: filePath,
   341  				Policy:   policy,
   342  			}
   343  			for _, entry := range writeVector.Entries {
   344  				readVector.Entries = append(readVector.Entries, IOEntry{
   345  					Offset: entry.Offset,
   346  					Size:   entry.Size,
   347  				})
   348  			}
   349  			err = fs.Read(ctx, readVector)
   350  			assert.Nil(t, err)
   351  			for i, entry := range readVector.Entries {
   352  				assert.Equal(t, parts[i], entry.Data, "part %d, got %+v", i, entry)
   353  			}
   354  
   355  			// read, random entry
   356  			parts = randomSplit(content, 16)
   357  			readVector.Entries = readVector.Entries[:0]
   358  			offset = int64(0)
   359  			for _, part := range parts {
   360  				readVector.Entries = append(readVector.Entries, IOEntry{
   361  					Offset: offset,
   362  					Size:   int64(len(part)),
   363  				})
   364  				offset += int64(len(part))
   365  			}
   366  			err = fs.Read(ctx, readVector)
   367  			assert.Nil(t, err)
   368  			for i, entry := range readVector.Entries {
   369  				assert.Equal(t, parts[i], entry.Data, "path: %s, entry: %+v, content %v", filePath, entry, content)
   370  			}
   371  
   372  			// read, random entry with ReadCloserForRead
   373  			parts = randomSplit(content, len(content)/10)
   374  			readVector.Entries = readVector.Entries[:0]
   375  			offset = int64(0)
   376  			readers := make([]io.ReadCloser, len(parts))
   377  			for i, part := range parts {
   378  				readVector.Entries = append(readVector.Entries, IOEntry{
   379  					Offset:            offset,
   380  					Size:              int64(len(part)),
   381  					ReadCloserForRead: &readers[i],
   382  				})
   383  				offset += int64(len(part))
   384  			}
   385  			err = fs.Read(ctx, readVector)
   386  			assert.Nil(t, err)
   387  			wg := new(sync.WaitGroup)
   388  			errCh := make(chan error, 1)
   389  			numDone := int64(0)
   390  			for i, entry := range readVector.Entries {
   391  				wg.Add(1)
   392  				i := i
   393  				entry := entry
   394  				go func() {
   395  					defer wg.Done()
   396  					reader := readers[i]
   397  					data, err := io.ReadAll(reader)
   398  					assert.Nil(t, err)
   399  					reader.Close()
   400  					if !bytes.Equal(parts[i], data) {
   401  						select {
   402  						case errCh <- moerr.NewInternalError(context.Background(),
   403  							"not equal: path: %s, entry: %+v, content %v",
   404  							filePath, entry, content,
   405  						):
   406  						default:
   407  						}
   408  					}
   409  					atomic.AddInt64(&numDone, 1)
   410  				}()
   411  			}
   412  			wg.Wait()
   413  			if int(numDone) != len(parts) {
   414  				t.Fatal()
   415  			}
   416  			select {
   417  			case err := <-errCh:
   418  				t.Fatal(err)
   419  			default:
   420  			}
   421  
   422  			// list
   423  			entries, err := fs.List(ctx, "/")
   424  			assert.Nil(t, err)
   425  			for _, entry := range entries {
   426  				if entry.Name != filePath {
   427  					continue
   428  				}
   429  				assert.Equal(t, filePath, entry.Name)
   430  				assert.Equal(t, false, entry.IsDir)
   431  				assert.Equal(t, int64(len(content)), entry.Size)
   432  			}
   433  
   434  		}
   435  	})
   436  
   437  	t.Run("tree", func(t *testing.T) {
   438  		fs := newFS(fsName)
   439  		ctx := context.Background()
   440  
   441  		for _, dir := range []string{
   442  			"",
   443  			"foo",
   444  			"bar",
   445  			"qux/quux",
   446  		} {
   447  			for i := int64(0); i < 8; i++ {
   448  				err := fs.Write(ctx, IOVector{
   449  					FilePath: path.Join(dir, fmt.Sprintf("%d", i)),
   450  					Entries: []IOEntry{
   451  						{
   452  							Size: i,
   453  							Data: []byte(strings.Repeat(fmt.Sprintf("%d", i), int(i))),
   454  						},
   455  					},
   456  					Policy: policy,
   457  				})
   458  				assert.Nil(t, err)
   459  			}
   460  		}
   461  
   462  		entries, err := fs.List(ctx, "")
   463  		assert.Nil(t, err)
   464  		assert.Equal(t, len(entries), 11)
   465  		sort.Slice(entries, func(i, j int) bool {
   466  			a := entries[i]
   467  			b := entries[j]
   468  			if a.IsDir && !b.IsDir {
   469  				return true
   470  			} else if !a.IsDir && b.IsDir {
   471  				return false
   472  			}
   473  			return a.Name < b.Name
   474  		})
   475  		assert.Equal(t, entries[0].IsDir, true)
   476  		assert.Equal(t, entries[0].Name, "bar")
   477  		assert.Equal(t, entries[1].IsDir, true)
   478  		assert.Equal(t, entries[1].Name, "foo")
   479  		assert.Equal(t, entries[2].IsDir, true)
   480  		assert.Equal(t, entries[2].Name, "qux")
   481  		assert.Equal(t, entries[3].IsDir, false)
   482  		assert.Equal(t, entries[3].Name, "0")
   483  		assert.Equal(t, entries[3].Size, int64(0))
   484  		assert.Equal(t, entries[10].IsDir, false)
   485  		assert.Equal(t, entries[10].Name, "7")
   486  		if _, ok := fs.(ETLFileService); ok {
   487  			assert.Equal(t, entries[10].Size, int64(7))
   488  		}
   489  
   490  		entries, err = fs.List(ctx, "abc")
   491  		assert.Nil(t, err)
   492  		assert.Equal(t, len(entries), 0)
   493  
   494  		entries, err = fs.List(ctx, "foo")
   495  		assert.Nil(t, err)
   496  		assert.Equal(t, len(entries), 8)
   497  		assert.Equal(t, entries[0].IsDir, false)
   498  		assert.Equal(t, entries[0].Name, "0")
   499  		assert.Equal(t, entries[7].IsDir, false)
   500  		assert.Equal(t, entries[7].Name, "7")
   501  
   502  		entries, err = fs.List(ctx, "qux/quux")
   503  		assert.Nil(t, err)
   504  		assert.Equal(t, len(entries), 8)
   505  		assert.Equal(t, entries[0].IsDir, false)
   506  		assert.Equal(t, entries[0].Name, "0")
   507  		assert.Equal(t, entries[7].IsDir, false)
   508  		assert.Equal(t, entries[7].Name, "7")
   509  
   510  		// with / suffix
   511  		entries, err = fs.List(ctx, "qux/quux/")
   512  		assert.Nil(t, err)
   513  		assert.Equal(t, len(entries), 8)
   514  		assert.Equal(t, entries[0].IsDir, false)
   515  		assert.Equal(t, entries[0].Name, "0")
   516  		assert.Equal(t, entries[7].IsDir, false)
   517  		assert.Equal(t, entries[7].Name, "7")
   518  
   519  		// with / prefix
   520  		entries, err = fs.List(ctx, "/qux/quux/")
   521  		assert.Nil(t, err)
   522  		assert.Equal(t, len(entries), 8)
   523  		assert.Equal(t, entries[0].IsDir, false)
   524  		assert.Equal(t, entries[0].Name, "0")
   525  		assert.Equal(t, entries[7].IsDir, false)
   526  		assert.Equal(t, entries[7].Name, "7")
   527  
   528  		// with fs name
   529  		entries, err = fs.List(ctx, JoinPath(fs.Name(), "qux/quux/"))
   530  		assert.Nil(t, err)
   531  		assert.Equal(t, len(entries), 8)
   532  		assert.Equal(t, entries[0].IsDir, false)
   533  		assert.Equal(t, entries[0].Name, "0")
   534  		assert.Equal(t, entries[7].IsDir, false)
   535  		assert.Equal(t, entries[7].Name, "7")
   536  
   537  		// with fs name and / prefix and suffix
   538  		entries, err = fs.List(ctx, JoinPath(fs.Name(), "/qux/quux/"))
   539  		assert.Nil(t, err)
   540  		assert.Equal(t, len(entries), 8)
   541  		assert.Equal(t, entries[0].IsDir, false)
   542  		assert.Equal(t, entries[0].Name, "0")
   543  		assert.Equal(t, entries[7].IsDir, false)
   544  		assert.Equal(t, entries[7].Name, "7")
   545  
   546  		for _, entry := range entries {
   547  			err := fs.Delete(ctx, path.Join("qux/quux", entry.Name))
   548  			assert.Nil(t, err)
   549  			// delete again
   550  			err = fs.Delete(ctx, path.Join("qux/quux", entry.Name))
   551  			assert.Nil(t, err)
   552  		}
   553  		entries, err = fs.List(ctx, "qux/quux")
   554  		assert.Nil(t, err)
   555  		assert.Equal(t, 0, len(entries))
   556  
   557  	})
   558  
   559  	t.Run("errors", func(t *testing.T) {
   560  		fs := newFS(fsName)
   561  		ctx := context.Background()
   562  
   563  		err := fs.Read(ctx, &IOVector{
   564  			FilePath: "foo",
   565  			Policy:   policy,
   566  		})
   567  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrEmptyVector))
   568  
   569  		err = fs.Read(ctx, &IOVector{
   570  			FilePath: "foo",
   571  			Entries: []IOEntry{
   572  				{
   573  					Size: -1,
   574  				},
   575  			},
   576  			Policy: policy,
   577  		})
   578  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrFileNotFound))
   579  
   580  		err = fs.Write(ctx, IOVector{
   581  			FilePath: "foo",
   582  			Entries: []IOEntry{
   583  				{
   584  					Size: 2,
   585  					Data: []byte("ab"),
   586  				},
   587  			},
   588  			Policy: policy,
   589  		})
   590  		assert.Nil(t, err)
   591  		err = fs.Write(ctx, IOVector{
   592  			FilePath: "foo",
   593  			Policy:   policy,
   594  		})
   595  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrFileAlreadyExists))
   596  
   597  		err = fs.Read(ctx, &IOVector{
   598  			FilePath: "foo",
   599  			Entries: []IOEntry{
   600  				{
   601  					Offset: 0,
   602  					Size:   3,
   603  				},
   604  			},
   605  			Policy: policy,
   606  		})
   607  		assert.True(t, moerr.IsMoErrCode(moerr.ConvertGoError(ctx, err), moerr.ErrUnexpectedEOF))
   608  
   609  		err = fs.Read(ctx, &IOVector{
   610  			FilePath: "foo",
   611  			Entries: []IOEntry{
   612  				{
   613  					Offset: 1,
   614  					Size:   0,
   615  				},
   616  			},
   617  			Policy: policy,
   618  		})
   619  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrEmptyRange))
   620  
   621  		err = fs.Write(ctx, IOVector{
   622  			FilePath: "bar",
   623  			Entries: []IOEntry{
   624  				{
   625  					Size: 1,
   626  				},
   627  			},
   628  			Policy: policy,
   629  		})
   630  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrSizeNotMatch))
   631  
   632  		err = fs.Write(ctx, IOVector{
   633  			FilePath: "foo",
   634  			Entries: []IOEntry{
   635  				{
   636  					ReaderForWrite: iotest.ErrReader(io.ErrNoProgress),
   637  				},
   638  			},
   639  			Policy: policy,
   640  		})
   641  		// fs leaking io error, but I don't know what this test really tests.
   642  		// assert.True(t, err == io.ErrNoProgress)
   643  		// assert.True(t, moerr.IsMoErrCode(moerr.ConvertGoError(err), moerr.ErrInternal))
   644  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrFileAlreadyExists))
   645  
   646  		vector := IOVector{
   647  			FilePath: JoinPath(fsName, "a#b#c"),
   648  			Entries: []IOEntry{
   649  				{Size: 1, Data: []byte("a")},
   650  			},
   651  			Policy: policy,
   652  		}
   653  		err = fs.Write(ctx, vector)
   654  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrInvalidPath))
   655  		err = fs.Read(ctx, &vector)
   656  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrInvalidPath))
   657  		_, err = fs.List(ctx, vector.FilePath)
   658  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrInvalidPath))
   659  		err = fs.Delete(ctx, vector.FilePath)
   660  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrInvalidPath))
   661  	})
   662  
   663  	t.Run("cache data", func(t *testing.T) {
   664  		fs := newFS(fsName)
   665  		ctx := context.Background()
   666  		var counterSet perfcounter.CounterSet
   667  		ctx = perfcounter.WithCounterSet(ctx, &counterSet)
   668  
   669  		m := api.Int64Map{
   670  			M: map[int64]int64{
   671  				42: 42,
   672  			},
   673  		}
   674  		data, err := m.Marshal()
   675  		assert.Nil(t, err)
   676  		err = fs.Write(ctx, IOVector{
   677  			FilePath: "foo",
   678  			Entries: []IOEntry{
   679  				{
   680  					Size: int64(len(data)),
   681  					Data: data,
   682  				},
   683  			},
   684  			Policy: policy,
   685  		})
   686  		assert.Nil(t, err)
   687  
   688  		// read with ToCacheData
   689  		vec := &IOVector{
   690  			FilePath: "foo",
   691  			Entries: []IOEntry{
   692  				{
   693  					Size: int64(len(data)),
   694  					ToCacheData: func(r io.Reader, data []byte, allocator CacheDataAllocator) (memorycache.CacheData, error) {
   695  						bs, err := io.ReadAll(r)
   696  						assert.Nil(t, err)
   697  						if len(data) > 0 {
   698  							assert.Equal(t, bs, data)
   699  						}
   700  						cacheData := allocator.Alloc(len(bs))
   701  						copy(cacheData.Bytes(), bs)
   702  						return cacheData, nil
   703  					},
   704  				},
   705  			},
   706  			Policy: policy,
   707  		}
   708  		err = fs.Read(ctx, vec)
   709  		assert.Nil(t, err)
   710  
   711  		cachedData := vec.Entries[0].CachedData
   712  		assert.NotNil(t, cachedData)
   713  		assert.Equal(t, data, cachedData.Bytes())
   714  
   715  		err = m.Unmarshal(vec.Entries[0].CachedData.Bytes())
   716  		assert.NoError(t, err)
   717  		assert.Equal(t, 1, len(m.M))
   718  		assert.Equal(t, int64(42), m.M[42])
   719  
   720  		vec.Release()
   721  
   722  		// ReadCache
   723  		vec = &IOVector{
   724  			FilePath: "foo",
   725  			Entries: []IOEntry{
   726  				{
   727  					Size: int64(len(data)),
   728  				},
   729  			},
   730  			Policy: policy,
   731  		}
   732  		err = fs.ReadCache(ctx, vec)
   733  		assert.Nil(t, err)
   734  		if vec.Entries[0].CachedData != nil {
   735  			assert.Equal(t, data, vec.Entries[0].CachedData.Bytes())
   736  		}
   737  		vec.Release()
   738  		fs.Close()
   739  	})
   740  
   741  	t.Run("ignore", func(t *testing.T) {
   742  		fs := newFS(fsName)
   743  		ctx := context.Background()
   744  
   745  		data := []byte("foo")
   746  		err := fs.Write(ctx, IOVector{
   747  			FilePath: "foo",
   748  			Entries: []IOEntry{
   749  				{
   750  					Size: int64(len(data)),
   751  					Data: data,
   752  				},
   753  			},
   754  			Policy: policy,
   755  		})
   756  		assert.Nil(t, err)
   757  
   758  		vec := &IOVector{
   759  			FilePath: "foo",
   760  			Entries: []IOEntry{
   761  				{
   762  					Size: int64(len(data)),
   763  					done: true,
   764  				},
   765  				{
   766  					Size: int64(len(data)),
   767  				},
   768  			},
   769  			Policy: policy,
   770  		}
   771  		err = fs.Read(ctx, vec)
   772  		assert.Nil(t, err)
   773  
   774  		assert.Nil(t, vec.Entries[0].Data)
   775  		assert.Equal(t, []byte("foo"), vec.Entries[1].Data)
   776  
   777  	})
   778  
   779  	t.Run("named path", func(t *testing.T) {
   780  		ctx := context.Background()
   781  		fs := newFS(fsName)
   782  
   783  		// write
   784  		err := fs.Write(ctx, IOVector{
   785  			FilePath: JoinPath(fs.Name(), "foo"),
   786  			Entries: []IOEntry{
   787  				{
   788  					Size: 4,
   789  					Data: []byte("1234"),
   790  				},
   791  			},
   792  			Policy: policy,
   793  		})
   794  		assert.Nil(t, err)
   795  
   796  		// read
   797  		vec := IOVector{
   798  			FilePath: "foo",
   799  			Entries: []IOEntry{
   800  				{
   801  					Size: -1,
   802  				},
   803  			},
   804  			Policy: policy,
   805  		}
   806  		err = fs.Read(ctx, &vec)
   807  		assert.Nil(t, err)
   808  		assert.Equal(t, []byte("1234"), vec.Entries[0].Data)
   809  
   810  		// read with lower named path
   811  		vec = IOVector{
   812  			FilePath: JoinPath(strings.ToLower(fs.Name()), "foo"),
   813  			Entries: []IOEntry{
   814  				{
   815  					Size: -1,
   816  				},
   817  			},
   818  			Policy: policy,
   819  		}
   820  		err = fs.Read(ctx, &vec)
   821  		assert.Nil(t, err)
   822  		assert.Equal(t, []byte("1234"), vec.Entries[0].Data)
   823  
   824  		// read with upper named path
   825  		vec = IOVector{
   826  			FilePath: JoinPath(strings.ToUpper(fs.Name()), "foo"),
   827  			Entries: []IOEntry{
   828  				{
   829  					Size: -1,
   830  				},
   831  			},
   832  			Policy: policy,
   833  		}
   834  		err = fs.Read(ctx, &vec)
   835  		assert.Nil(t, err)
   836  		assert.Equal(t, []byte("1234"), vec.Entries[0].Data)
   837  
   838  		// bad name
   839  		vec.FilePath = JoinPath(fs.Name()+"abc", "foo")
   840  		err = fs.Read(ctx, &vec)
   841  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNoService) || moerr.IsMoErrCode(err, moerr.ErrWrongService))
   842  		err = fs.Write(ctx, vec)
   843  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNoService) || moerr.IsMoErrCode(err, moerr.ErrWrongService))
   844  		err = fs.Delete(ctx, vec.FilePath)
   845  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNoService) || moerr.IsMoErrCode(err, moerr.ErrWrongService))
   846  	})
   847  
   848  	t.Run("issue6110", func(t *testing.T) {
   849  		ctx := context.Background()
   850  		fs := newFS(fsName)
   851  		err := fs.Write(ctx, IOVector{
   852  			FilePath: "path/to/file/foo",
   853  			Entries: []IOEntry{
   854  				{
   855  					Offset: 0,
   856  					Size:   4,
   857  					Data:   []byte("1234"),
   858  				},
   859  			},
   860  			Policy: policy,
   861  		})
   862  		assert.Nil(t, err)
   863  		entries, err := fs.List(ctx, JoinPath(fs.Name(), "/path"))
   864  		assert.Nil(t, err)
   865  		assert.Equal(t, 1, len(entries))
   866  		assert.Equal(t, "to", entries[0].Name)
   867  	})
   868  
   869  	t.Run("streaming write", func(t *testing.T) {
   870  		ctx := context.Background()
   871  		fs := newFS(fsName)
   872  
   873  		reader, writer := io.Pipe()
   874  		n := 65536
   875  		defer reader.Close()
   876  		defer writer.Close()
   877  
   878  		go func() {
   879  			csvWriter := csv.NewWriter(writer)
   880  			for i := 0; i < n; i++ {
   881  				err := csvWriter.Write([]string{"foo", strconv.Itoa(i)})
   882  				if err != nil {
   883  					writer.CloseWithError(err)
   884  					return
   885  				}
   886  			}
   887  			csvWriter.Flush()
   888  			if err := csvWriter.Error(); err != nil {
   889  				writer.CloseWithError(err)
   890  				return
   891  			}
   892  			writer.Close()
   893  		}()
   894  
   895  		filePath := "foo"
   896  		vec := IOVector{
   897  			FilePath: filePath,
   898  			Entries: []IOEntry{
   899  				{
   900  					ReaderForWrite: reader,
   901  					Size:           -1, // must set to -1
   902  				},
   903  			},
   904  			Policy: policy,
   905  		}
   906  
   907  		// write
   908  		err := fs.Write(ctx, vec)
   909  		assert.Nil(t, err)
   910  
   911  		// read
   912  		vec = IOVector{
   913  			FilePath: filePath,
   914  			Entries: []IOEntry{
   915  				{
   916  					Size: -1,
   917  				},
   918  			},
   919  			Policy: policy,
   920  		}
   921  		err = fs.Read(ctx, &vec)
   922  		assert.Nil(t, err)
   923  
   924  		// validate
   925  		buf := new(bytes.Buffer)
   926  		csvWriter := csv.NewWriter(buf)
   927  		for i := 0; i < n; i++ {
   928  			err := csvWriter.Write([]string{"foo", strconv.Itoa(i)})
   929  			assert.Nil(t, err)
   930  		}
   931  		csvWriter.Flush()
   932  		err = csvWriter.Error()
   933  		assert.Nil(t, err)
   934  		assert.Equal(t, buf.Bytes(), vec.Entries[0].Data)
   935  
   936  		// write to existed
   937  		vec = IOVector{
   938  			FilePath: filePath,
   939  			Entries: []IOEntry{
   940  				{
   941  					ReaderForWrite: bytes.NewReader([]byte("abc")),
   942  					Size:           -1,
   943  				},
   944  			},
   945  			Policy: policy,
   946  		}
   947  		err = fs.Write(ctx, vec)
   948  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrFileAlreadyExists))
   949  
   950  		// cancel write
   951  		reader, writer = io.Pipe()
   952  		defer reader.Close()
   953  		defer writer.Close()
   954  		vec = IOVector{
   955  			FilePath: "bar",
   956  			Entries: []IOEntry{
   957  				{
   958  					ReaderForWrite: reader,
   959  					Size:           -1,
   960  				},
   961  			},
   962  			Policy: policy,
   963  		}
   964  		ctx, cancel := context.WithCancel(context.Background())
   965  		cancel()
   966  		errCh := make(chan error)
   967  		go func() {
   968  			err := fs.Write(ctx, vec)
   969  			errCh <- err
   970  		}()
   971  		select {
   972  		case err := <-errCh:
   973  			assert.True(t, errors.Is(err, context.Canceled))
   974  		case <-time.After(time.Second * 10):
   975  			t.Fatal("should cancel")
   976  		}
   977  
   978  	})
   979  
   980  	t.Run("context cancel", func(t *testing.T) {
   981  		fs := newFS(fsName)
   982  		ctx, cancel := context.WithCancel(context.Background())
   983  		cancel()
   984  
   985  		err := fs.Write(ctx, IOVector{
   986  			Policy: policy,
   987  		})
   988  		assert.ErrorIs(t, err, context.Canceled)
   989  
   990  		err = fs.Read(ctx, &IOVector{
   991  			Policy: policy,
   992  		})
   993  		assert.ErrorIs(t, err, context.Canceled)
   994  
   995  		_, err = fs.List(ctx, "")
   996  		assert.ErrorIs(t, err, context.Canceled)
   997  
   998  		err = fs.Delete(ctx, "")
   999  		assert.ErrorIs(t, err, context.Canceled)
  1000  	})
  1001  
  1002  }
  1003  
  1004  func randomSplit(data []byte, maxLen int) (ret [][]byte) {
  1005  	for {
  1006  		if len(data) == 0 {
  1007  			return
  1008  		}
  1009  		if len(data) < maxLen {
  1010  			ret = append(ret, data)
  1011  			return
  1012  		}
  1013  		cut := 1 + mrand.Intn(maxLen)
  1014  		ret = append(ret, data[:cut])
  1015  		data = data[cut:]
  1016  	}
  1017  }
  1018  
  1019  func fixedSplit(data []byte, l int) (ret [][]byte) {
  1020  	for {
  1021  		if len(data) == 0 {
  1022  			return
  1023  		}
  1024  		if len(data) < l {
  1025  			ret = append(ret, data)
  1026  			return
  1027  		}
  1028  		ret = append(ret, data[:l])
  1029  		data = data[l:]
  1030  	}
  1031  }