github.com/matrixorigin/matrixone@v0.7.0/pkg/fileservice/file_service_test.go (about)

     1  // Copyright 2022 Matrix Origin
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package fileservice
    16  
    17  import (
    18  	"bytes"
    19  	"context"
    20  	"crypto/rand"
    21  	"encoding/csv"
    22  	"encoding/gob"
    23  	"errors"
    24  	"fmt"
    25  	"io"
    26  	mrand "math/rand"
    27  	"path"
    28  	"sort"
    29  	"strconv"
    30  	"strings"
    31  	"sync"
    32  	"sync/atomic"
    33  	"testing"
    34  	"testing/iotest"
    35  	"time"
    36  
    37  	"github.com/matrixorigin/matrixone/pkg/common/moerr"
    38  	"github.com/stretchr/testify/assert"
    39  )
    40  
    41  func testFileService(
    42  	t *testing.T,
    43  	newFS func(name string) FileService,
    44  ) {
    45  
    46  	fsName := time.Now().Format("fs-2006-01-02-15-04-05")
    47  
    48  	t.Run("basic", func(t *testing.T) {
    49  		ctx := context.Background()
    50  		fs := newFS(fsName)
    51  
    52  		assert.Equal(t, fsName, fs.Name())
    53  
    54  		entries, err := fs.List(ctx, "")
    55  		assert.Nil(t, err)
    56  		assert.Equal(t, 0, len(entries))
    57  
    58  		err = fs.Write(ctx, IOVector{
    59  			FilePath: "foo",
    60  			Entries: []IOEntry{
    61  				{
    62  					Offset: 0,
    63  					Size:   4,
    64  					Data:   []byte("1234"),
    65  				},
    66  				{
    67  					Offset: 4,
    68  					Size:   4,
    69  					Data:   []byte("5678"),
    70  				},
    71  				{
    72  					Offset:         8,
    73  					Size:           3,
    74  					ReaderForWrite: bytes.NewReader([]byte("9ab")),
    75  				},
    76  			},
    77  		})
    78  		assert.Nil(t, err)
    79  
    80  		entries, err = fs.List(ctx, "")
    81  		assert.Nil(t, err)
    82  		assert.Equal(t, 1, len(entries))
    83  
    84  		buf1 := new(bytes.Buffer)
    85  		var r io.ReadCloser
    86  		buf2 := make([]byte, 4)
    87  		vec := IOVector{
    88  			FilePath: "foo",
    89  			Entries: []IOEntry{
    90  				0: {
    91  					Offset: 2,
    92  					Size:   2,
    93  				},
    94  				1: {
    95  					Offset: 2,
    96  					Size:   4,
    97  					Data:   buf2,
    98  				},
    99  				2: {
   100  					Offset: 7,
   101  					Size:   1,
   102  				},
   103  				3: {
   104  					Offset: 0,
   105  					Size:   1,
   106  				},
   107  				4: {
   108  					Offset:            0,
   109  					Size:              7,
   110  					ReadCloserForRead: &r,
   111  				},
   112  				5: {
   113  					Offset:        4,
   114  					Size:          2,
   115  					WriterForRead: buf1,
   116  				},
   117  				6: {
   118  					Offset: 0,
   119  					Size:   -1,
   120  				},
   121  			},
   122  		}
   123  		err = fs.Read(ctx, &vec)
   124  		assert.Nil(t, err)
   125  		assert.Equal(t, []byte("34"), vec.Entries[0].Data)
   126  		assert.Equal(t, []byte("3456"), vec.Entries[1].Data)
   127  		assert.Equal(t, []byte("3456"), buf2)
   128  		assert.Equal(t, []byte("8"), vec.Entries[2].Data)
   129  		assert.Equal(t, []byte("1"), vec.Entries[3].Data)
   130  		content, err := io.ReadAll(r)
   131  		assert.Nil(t, err)
   132  		assert.Nil(t, r.Close())
   133  		assert.Equal(t, []byte("1234567"), content)
   134  		assert.Equal(t, []byte("56"), buf1.Bytes())
   135  		assert.Equal(t, []byte("123456789ab"), vec.Entries[6].Data)
   136  
   137  		// stat
   138  		entry, err := fs.StatFile(ctx, "foo")
   139  		assert.Nil(t, err)
   140  		assert.Equal(t, "foo", entry.Name)
   141  		assert.Equal(t, false, entry.IsDir)
   142  		assert.Equal(t, int64(11), entry.Size)
   143  
   144  		// read from non-zero offset
   145  		vec = IOVector{
   146  			FilePath: "foo",
   147  			Entries: []IOEntry{
   148  				{
   149  					Offset: 7,
   150  					Size:   1,
   151  				},
   152  			},
   153  		}
   154  		err = fs.Read(ctx, &vec)
   155  		assert.Nil(t, err)
   156  		assert.Equal(t, []byte("8"), vec.Entries[0].Data)
   157  
   158  		// sub path
   159  		err = fs.Write(ctx, IOVector{
   160  			FilePath: "sub/sub2/sub3",
   161  			Entries: []IOEntry{
   162  				{
   163  					Offset: 0,
   164  					Size:   1,
   165  					Data:   []byte("1"),
   166  				},
   167  			},
   168  		})
   169  		assert.Nil(t, err)
   170  
   171  	})
   172  
   173  	t.Run("WriterForRead", func(t *testing.T) {
   174  		fs := newFS(fsName)
   175  		ctx := context.Background()
   176  
   177  		err := fs.Write(ctx, IOVector{
   178  			FilePath: "foo",
   179  			Entries: []IOEntry{
   180  				{
   181  					Offset: 0,
   182  					Size:   4,
   183  					Data:   []byte("1234"),
   184  				},
   185  			},
   186  		})
   187  		assert.Nil(t, err)
   188  
   189  		buf := new(bytes.Buffer)
   190  		vec := &IOVector{
   191  			FilePath: "foo",
   192  			Entries: []IOEntry{
   193  				{
   194  					Offset:        0,
   195  					Size:          4,
   196  					WriterForRead: buf,
   197  				},
   198  			},
   199  		}
   200  		err = fs.Read(ctx, vec)
   201  		assert.Nil(t, err)
   202  		assert.Equal(t, []byte("1234"), buf.Bytes())
   203  
   204  		buf = new(bytes.Buffer)
   205  		vec = &IOVector{
   206  			FilePath: "foo",
   207  			Entries: []IOEntry{
   208  				{
   209  					Offset:        0,
   210  					Size:          -1,
   211  					WriterForRead: buf,
   212  				},
   213  			},
   214  		}
   215  		err = fs.Read(ctx, vec)
   216  		assert.Nil(t, err)
   217  		assert.Equal(t, []byte("1234"), buf.Bytes())
   218  
   219  	})
   220  
   221  	t.Run("ReadCloserForRead", func(t *testing.T) {
   222  		fs := newFS(fsName)
   223  		ctx := context.Background()
   224  		err := fs.Write(ctx, IOVector{
   225  			FilePath: "foo",
   226  			Entries: []IOEntry{
   227  				{
   228  					Offset: 0,
   229  					Size:   4,
   230  					Data:   []byte("1234"),
   231  				},
   232  			},
   233  		})
   234  		assert.Nil(t, err)
   235  
   236  		var r io.ReadCloser
   237  		vec := &IOVector{
   238  			FilePath: "foo",
   239  			Entries: []IOEntry{
   240  				{
   241  					Offset:            0,
   242  					Size:              4,
   243  					ReadCloserForRead: &r,
   244  				},
   245  			},
   246  		}
   247  		err = fs.Read(ctx, vec)
   248  		assert.Nil(t, err)
   249  		data, err := io.ReadAll(r)
   250  		assert.Nil(t, err)
   251  		assert.Equal(t, []byte("1234"), data)
   252  		err = r.Close()
   253  		assert.Nil(t, err)
   254  
   255  		vec = &IOVector{
   256  			FilePath: "foo",
   257  			Entries: []IOEntry{
   258  				{
   259  					Offset:            1,
   260  					Size:              3,
   261  					ReadCloserForRead: &r,
   262  				},
   263  			},
   264  		}
   265  		err = fs.Read(ctx, vec)
   266  		assert.Nil(t, err)
   267  		data, err = io.ReadAll(r)
   268  		assert.Nil(t, err)
   269  		assert.Equal(t, []byte("234"), data)
   270  		err = r.Close()
   271  		assert.Nil(t, err)
   272  
   273  	})
   274  
   275  	t.Run("random", func(t *testing.T) {
   276  		fs := newFS(fsName)
   277  		ctx := context.Background()
   278  
   279  		for i := 0; i < 8; i++ {
   280  			filePath := fmt.Sprintf("%d", mrand.Int63())
   281  
   282  			// random content
   283  			content := make([]byte, _BlockContentSize*4)
   284  			_, err := rand.Read(content)
   285  			assert.Nil(t, err)
   286  			parts := randomSplit(content, 32)
   287  
   288  			// write
   289  			writeVector := IOVector{
   290  				FilePath: filePath,
   291  			}
   292  			offset := int64(0)
   293  			for _, part := range parts {
   294  				writeVector.Entries = append(writeVector.Entries, IOEntry{
   295  					Offset: offset,
   296  					Size:   int64(len(part)),
   297  					Data:   part,
   298  				})
   299  				offset += int64(len(part))
   300  			}
   301  			err = fs.Write(ctx, writeVector)
   302  			assert.Nil(t, err)
   303  
   304  			// read, align to write vector
   305  			readVector := &IOVector{
   306  				FilePath: filePath,
   307  			}
   308  			for _, entry := range writeVector.Entries {
   309  				readVector.Entries = append(readVector.Entries, IOEntry{
   310  					Offset: entry.Offset,
   311  					Size:   entry.Size,
   312  				})
   313  			}
   314  			err = fs.Read(ctx, readVector)
   315  			assert.Nil(t, err)
   316  			for i, entry := range readVector.Entries {
   317  				assert.Equal(t, parts[i], entry.Data, "part %d, got %+v", i, entry)
   318  			}
   319  
   320  			// read, random entry
   321  			parts = randomSplit(content, 16)
   322  			readVector.Entries = readVector.Entries[:0]
   323  			offset = int64(0)
   324  			for _, part := range parts {
   325  				readVector.Entries = append(readVector.Entries, IOEntry{
   326  					Offset: offset,
   327  					Size:   int64(len(part)),
   328  				})
   329  				offset += int64(len(part))
   330  			}
   331  			err = fs.Read(ctx, readVector)
   332  			assert.Nil(t, err)
   333  			for i, entry := range readVector.Entries {
   334  				assert.Equal(t, parts[i], entry.Data, "path: %s, entry: %+v, content %v", filePath, entry, content)
   335  			}
   336  
   337  			// read, random entry with ReadCloserForRead
   338  			parts = randomSplit(content, len(content)/10)
   339  			readVector.Entries = readVector.Entries[:0]
   340  			offset = int64(0)
   341  			readers := make([]io.ReadCloser, len(parts))
   342  			for i, part := range parts {
   343  				readVector.Entries = append(readVector.Entries, IOEntry{
   344  					Offset:            offset,
   345  					Size:              int64(len(part)),
   346  					ReadCloserForRead: &readers[i],
   347  				})
   348  				offset += int64(len(part))
   349  			}
   350  			err = fs.Read(ctx, readVector)
   351  			assert.Nil(t, err)
   352  			wg := new(sync.WaitGroup)
   353  			errCh := make(chan error, 1)
   354  			numDone := int64(0)
   355  			for i, entry := range readVector.Entries {
   356  				wg.Add(1)
   357  				i := i
   358  				entry := entry
   359  				go func() {
   360  					defer wg.Done()
   361  					reader := readers[i]
   362  					data, err := io.ReadAll(reader)
   363  					assert.Nil(t, err)
   364  					reader.Close()
   365  					if !bytes.Equal(parts[i], data) {
   366  						select {
   367  						case errCh <- moerr.NewInternalError(context.Background(),
   368  							"not equal: path: %s, entry: %+v, content %v",
   369  							filePath, entry, content,
   370  						):
   371  						default:
   372  						}
   373  					}
   374  					atomic.AddInt64(&numDone, 1)
   375  				}()
   376  			}
   377  			wg.Wait()
   378  			if int(numDone) != len(parts) {
   379  				t.Fatal()
   380  			}
   381  			select {
   382  			case err := <-errCh:
   383  				t.Fatal(err)
   384  			default:
   385  			}
   386  
   387  			// list
   388  			entries, err := fs.List(ctx, "/")
   389  			assert.Nil(t, err)
   390  			for _, entry := range entries {
   391  				if entry.Name != filePath {
   392  					continue
   393  				}
   394  				assert.Equal(t, filePath, entry.Name)
   395  				assert.Equal(t, false, entry.IsDir)
   396  				assert.Equal(t, int64(len(content)), entry.Size)
   397  			}
   398  
   399  		}
   400  	})
   401  
   402  	t.Run("tree", func(t *testing.T) {
   403  		fs := newFS(fsName)
   404  		ctx := context.Background()
   405  
   406  		for _, dir := range []string{
   407  			"",
   408  			"foo",
   409  			"bar",
   410  			"qux/quux",
   411  		} {
   412  			for i := int64(0); i < 8; i++ {
   413  				err := fs.Write(ctx, IOVector{
   414  					FilePath: path.Join(dir, fmt.Sprintf("%d", i)),
   415  					Entries: []IOEntry{
   416  						{
   417  							Size: i,
   418  							Data: []byte(strings.Repeat(fmt.Sprintf("%d", i), int(i))),
   419  						},
   420  					},
   421  				})
   422  				assert.Nil(t, err)
   423  			}
   424  		}
   425  
   426  		entries, err := fs.List(ctx, "")
   427  		assert.Nil(t, err)
   428  		assert.Equal(t, len(entries), 11)
   429  		sort.Slice(entries, func(i, j int) bool {
   430  			a := entries[i]
   431  			b := entries[j]
   432  			if a.IsDir && !b.IsDir {
   433  				return true
   434  			} else if !a.IsDir && b.IsDir {
   435  				return false
   436  			}
   437  			return a.Name < b.Name
   438  		})
   439  		assert.Equal(t, entries[0].IsDir, true)
   440  		assert.Equal(t, entries[0].Name, "bar")
   441  		assert.Equal(t, entries[1].IsDir, true)
   442  		assert.Equal(t, entries[1].Name, "foo")
   443  		assert.Equal(t, entries[2].IsDir, true)
   444  		assert.Equal(t, entries[2].Name, "qux")
   445  		assert.Equal(t, entries[3].IsDir, false)
   446  		assert.Equal(t, entries[3].Name, "0")
   447  		assert.Equal(t, entries[3].Size, int64(0))
   448  		assert.Equal(t, entries[10].IsDir, false)
   449  		assert.Equal(t, entries[10].Name, "7")
   450  		if _, ok := fs.(ETLFileService); ok {
   451  			assert.Equal(t, entries[10].Size, int64(7))
   452  		}
   453  
   454  		entries, err = fs.List(ctx, "abc")
   455  		assert.Nil(t, err)
   456  		assert.Equal(t, len(entries), 0)
   457  
   458  		entries, err = fs.List(ctx, "foo")
   459  		assert.Nil(t, err)
   460  		assert.Equal(t, len(entries), 8)
   461  		assert.Equal(t, entries[0].IsDir, false)
   462  		assert.Equal(t, entries[0].Name, "0")
   463  		assert.Equal(t, entries[7].IsDir, false)
   464  		assert.Equal(t, entries[7].Name, "7")
   465  
   466  		entries, err = fs.List(ctx, "qux/quux")
   467  		assert.Nil(t, err)
   468  		assert.Equal(t, len(entries), 8)
   469  		assert.Equal(t, entries[0].IsDir, false)
   470  		assert.Equal(t, entries[0].Name, "0")
   471  		assert.Equal(t, entries[7].IsDir, false)
   472  		assert.Equal(t, entries[7].Name, "7")
   473  
   474  		// with / suffix
   475  		entries, err = fs.List(ctx, "qux/quux/")
   476  		assert.Nil(t, err)
   477  		assert.Equal(t, len(entries), 8)
   478  		assert.Equal(t, entries[0].IsDir, false)
   479  		assert.Equal(t, entries[0].Name, "0")
   480  		assert.Equal(t, entries[7].IsDir, false)
   481  		assert.Equal(t, entries[7].Name, "7")
   482  
   483  		// with / prefix
   484  		entries, err = fs.List(ctx, "/qux/quux/")
   485  		assert.Nil(t, err)
   486  		assert.Equal(t, len(entries), 8)
   487  		assert.Equal(t, entries[0].IsDir, false)
   488  		assert.Equal(t, entries[0].Name, "0")
   489  		assert.Equal(t, entries[7].IsDir, false)
   490  		assert.Equal(t, entries[7].Name, "7")
   491  
   492  		// with fs name
   493  		entries, err = fs.List(ctx, JoinPath(fsName, "qux/quux/"))
   494  		assert.Nil(t, err)
   495  		assert.Equal(t, len(entries), 8)
   496  		assert.Equal(t, entries[0].IsDir, false)
   497  		assert.Equal(t, entries[0].Name, "0")
   498  		assert.Equal(t, entries[7].IsDir, false)
   499  		assert.Equal(t, entries[7].Name, "7")
   500  
   501  		// with fs name and / prefix and suffix
   502  		entries, err = fs.List(ctx, JoinPath(fsName, "/qux/quux/"))
   503  		assert.Nil(t, err)
   504  		assert.Equal(t, len(entries), 8)
   505  		assert.Equal(t, entries[0].IsDir, false)
   506  		assert.Equal(t, entries[0].Name, "0")
   507  		assert.Equal(t, entries[7].IsDir, false)
   508  		assert.Equal(t, entries[7].Name, "7")
   509  
   510  		for _, entry := range entries {
   511  			err := fs.Delete(ctx, path.Join("qux/quux", entry.Name))
   512  			assert.Nil(t, err)
   513  		}
   514  		entries, err = fs.List(ctx, "qux/quux")
   515  		assert.Nil(t, err)
   516  		assert.Equal(t, len(entries), 0)
   517  
   518  	})
   519  
   520  	t.Run("errors", func(t *testing.T) {
   521  		fs := newFS(fsName)
   522  		ctx := context.Background()
   523  
   524  		err := fs.Read(ctx, &IOVector{
   525  			FilePath: "foo",
   526  		})
   527  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrEmptyVector))
   528  
   529  		err = fs.Read(ctx, &IOVector{
   530  			FilePath: "foo",
   531  			Entries: []IOEntry{
   532  				{
   533  					Size: -1,
   534  				},
   535  			},
   536  		})
   537  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrFileNotFound))
   538  
   539  		err = fs.Write(ctx, IOVector{
   540  			FilePath: "foo",
   541  			Entries: []IOEntry{
   542  				{
   543  					Size: 2,
   544  					Data: []byte("ab"),
   545  				},
   546  			},
   547  		})
   548  		assert.Nil(t, err)
   549  		err = fs.Write(ctx, IOVector{
   550  			FilePath: "foo",
   551  		})
   552  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrFileAlreadyExists))
   553  
   554  		err = fs.Read(ctx, &IOVector{
   555  			FilePath: "foo",
   556  			Entries: []IOEntry{
   557  				{
   558  					Offset: 0,
   559  					Size:   3,
   560  				},
   561  			},
   562  		})
   563  		assert.True(t, moerr.IsMoErrCode(moerr.ConvertGoError(ctx, err), moerr.ErrUnexpectedEOF))
   564  
   565  		err = fs.Read(ctx, &IOVector{
   566  			FilePath: "foo",
   567  			Entries: []IOEntry{
   568  				{
   569  					Offset: 1,
   570  					Size:   0,
   571  				},
   572  			},
   573  		})
   574  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrEmptyRange))
   575  
   576  		err = fs.Write(ctx, IOVector{
   577  			FilePath: "bar",
   578  			Entries: []IOEntry{
   579  				{
   580  					Size: 1,
   581  				},
   582  			},
   583  		})
   584  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrSizeNotMatch))
   585  
   586  		err = fs.Write(ctx, IOVector{
   587  			FilePath: "foo",
   588  			Entries: []IOEntry{
   589  				{
   590  					ReaderForWrite: iotest.ErrReader(io.ErrNoProgress),
   591  				},
   592  			},
   593  		})
   594  		// fs leaking io error, but I don't know what this test really tests.
   595  		// assert.True(t, err == io.ErrNoProgress)
   596  		// assert.True(t, moerr.IsMoErrCode(moerr.ConvertGoError(err), moerr.ErrInternal))
   597  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrFileAlreadyExists))
   598  
   599  		vector := IOVector{
   600  			FilePath: JoinPath(fsName, "a#b#c"),
   601  			Entries: []IOEntry{
   602  				{Size: 1, Data: []byte("a")},
   603  			},
   604  		}
   605  		err = fs.Write(ctx, vector)
   606  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrInvalidPath))
   607  		err = fs.Read(ctx, &vector)
   608  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrInvalidPath))
   609  		_, err = fs.List(ctx, vector.FilePath)
   610  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrInvalidPath))
   611  		err = fs.Delete(ctx, vector.FilePath)
   612  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrInvalidPath))
   613  	})
   614  
   615  	t.Run("object", func(t *testing.T) {
   616  		fs := newFS(fsName)
   617  		ctx := context.Background()
   618  
   619  		buf := new(bytes.Buffer)
   620  		err := gob.NewEncoder(buf).Encode(map[int]int{
   621  			42: 42,
   622  		})
   623  		assert.Nil(t, err)
   624  		data := buf.Bytes()
   625  		err = fs.Write(ctx, IOVector{
   626  			FilePath: "foo",
   627  			Entries: []IOEntry{
   628  				{
   629  					Size: int64(len(data)),
   630  					Data: data,
   631  				},
   632  			},
   633  		})
   634  		assert.Nil(t, err)
   635  
   636  		vec := &IOVector{
   637  			FilePath: "foo",
   638  			Entries: []IOEntry{
   639  				{
   640  					Size: int64(len(data)),
   641  					ToObject: func(r io.Reader, data []byte) (any, int64, error) {
   642  						bs, err := io.ReadAll(r)
   643  						assert.Nil(t, err)
   644  						if len(data) > 0 {
   645  							assert.Equal(t, bs, data)
   646  						}
   647  						var m map[int]int
   648  						if err := gob.NewDecoder(bytes.NewReader(bs)).Decode(&m); err != nil {
   649  							return nil, 0, err
   650  						}
   651  						return m, 1, nil
   652  					},
   653  				},
   654  			},
   655  		}
   656  		err = fs.Read(ctx, vec)
   657  		assert.Nil(t, err)
   658  
   659  		m, ok := vec.Entries[0].Object.(map[int]int)
   660  		assert.True(t, ok)
   661  		assert.Equal(t, 1, len(m))
   662  		assert.Equal(t, 42, m[42])
   663  		assert.Equal(t, int64(1), vec.Entries[0].ObjectSize)
   664  
   665  	})
   666  
   667  	t.Run("ignore", func(t *testing.T) {
   668  		fs := newFS(fsName)
   669  		ctx := context.Background()
   670  
   671  		data := []byte("foo")
   672  		err := fs.Write(ctx, IOVector{
   673  			FilePath: "foo",
   674  			Entries: []IOEntry{
   675  				{
   676  					Size: int64(len(data)),
   677  					Data: data,
   678  				},
   679  			},
   680  		})
   681  		assert.Nil(t, err)
   682  
   683  		vec := &IOVector{
   684  			FilePath: "foo",
   685  			Entries: []IOEntry{
   686  				{
   687  					Size: int64(len(data)),
   688  					done: true,
   689  				},
   690  				{
   691  					Size: int64(len(data)),
   692  				},
   693  			},
   694  		}
   695  		err = fs.Read(ctx, vec)
   696  		assert.Nil(t, err)
   697  
   698  		assert.Nil(t, vec.Entries[0].Data)
   699  		assert.Equal(t, []byte("foo"), vec.Entries[1].Data)
   700  
   701  	})
   702  
   703  	t.Run("named path", func(t *testing.T) {
   704  		ctx := context.Background()
   705  		fs := newFS(fsName)
   706  
   707  		// write
   708  		err := fs.Write(ctx, IOVector{
   709  			FilePath: JoinPath(fs.Name(), "foo"),
   710  			Entries: []IOEntry{
   711  				{
   712  					Size: 4,
   713  					Data: []byte("1234"),
   714  				},
   715  			},
   716  		})
   717  		assert.Nil(t, err)
   718  
   719  		// read
   720  		vec := IOVector{
   721  			FilePath: "foo",
   722  			Entries: []IOEntry{
   723  				{
   724  					Size: -1,
   725  				},
   726  			},
   727  		}
   728  		err = fs.Read(ctx, &vec)
   729  		assert.Nil(t, err)
   730  		assert.Equal(t, []byte("1234"), vec.Entries[0].Data)
   731  
   732  		// read with lower named path
   733  		vec = IOVector{
   734  			FilePath: JoinPath(strings.ToLower(fs.Name()), "foo"),
   735  			Entries: []IOEntry{
   736  				{
   737  					Size: -1,
   738  				},
   739  			},
   740  		}
   741  		err = fs.Read(ctx, &vec)
   742  		assert.Nil(t, err)
   743  		assert.Equal(t, []byte("1234"), vec.Entries[0].Data)
   744  
   745  		// read with upper named path
   746  		vec = IOVector{
   747  			FilePath: JoinPath(strings.ToUpper(fs.Name()), "foo"),
   748  			Entries: []IOEntry{
   749  				{
   750  					Size: -1,
   751  				},
   752  			},
   753  		}
   754  		err = fs.Read(ctx, &vec)
   755  		assert.Nil(t, err)
   756  		assert.Equal(t, []byte("1234"), vec.Entries[0].Data)
   757  
   758  		// bad name
   759  		vec.FilePath = JoinPath(fs.Name()+"abc", "foo")
   760  		err = fs.Read(ctx, &vec)
   761  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNoService) || moerr.IsMoErrCode(err, moerr.ErrWrongService))
   762  		err = fs.Write(ctx, vec)
   763  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNoService) || moerr.IsMoErrCode(err, moerr.ErrWrongService))
   764  		err = fs.Delete(ctx, vec.FilePath)
   765  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNoService) || moerr.IsMoErrCode(err, moerr.ErrWrongService))
   766  	})
   767  
   768  	t.Run("issue6110", func(t *testing.T) {
   769  		ctx := context.Background()
   770  		fs := newFS(fsName)
   771  		err := fs.Write(ctx, IOVector{
   772  			FilePath: "path/to/file/foo",
   773  			Entries: []IOEntry{
   774  				{
   775  					Offset: 0,
   776  					Size:   4,
   777  					Data:   []byte("1234"),
   778  				},
   779  			},
   780  		})
   781  		assert.Nil(t, err)
   782  		entries, err := fs.List(ctx, JoinPath(fsName, "/path"))
   783  		assert.Nil(t, err)
   784  		assert.Equal(t, 1, len(entries))
   785  		assert.Equal(t, "to", entries[0].Name)
   786  	})
   787  
   788  	t.Run("streaming write", func(t *testing.T) {
   789  		ctx := context.Background()
   790  		fs := newFS(fsName)
   791  
   792  		reader, writer := io.Pipe()
   793  		n := 65536
   794  		defer reader.Close()
   795  		defer writer.Close()
   796  
   797  		go func() {
   798  			csvWriter := csv.NewWriter(writer)
   799  			for i := 0; i < n; i++ {
   800  				err := csvWriter.Write([]string{"foo", strconv.Itoa(i)})
   801  				if err != nil {
   802  					writer.CloseWithError(err)
   803  					return
   804  				}
   805  			}
   806  			csvWriter.Flush()
   807  			if err := csvWriter.Error(); err != nil {
   808  				writer.CloseWithError(err)
   809  				return
   810  			}
   811  			writer.Close()
   812  		}()
   813  
   814  		filePath := "foo"
   815  		vec := IOVector{
   816  			FilePath: filePath,
   817  			Entries: []IOEntry{
   818  				{
   819  					ReaderForWrite: reader,
   820  					Size:           -1, // must set to -1
   821  				},
   822  			},
   823  		}
   824  
   825  		// write
   826  		err := fs.Write(ctx, vec)
   827  		assert.Nil(t, err)
   828  
   829  		// read
   830  		vec = IOVector{
   831  			FilePath: filePath,
   832  			Entries: []IOEntry{
   833  				{
   834  					Size: -1,
   835  				},
   836  			},
   837  		}
   838  		err = fs.Read(ctx, &vec)
   839  		assert.Nil(t, err)
   840  
   841  		// validate
   842  		buf := new(bytes.Buffer)
   843  		csvWriter := csv.NewWriter(buf)
   844  		for i := 0; i < n; i++ {
   845  			err := csvWriter.Write([]string{"foo", strconv.Itoa(i)})
   846  			assert.Nil(t, err)
   847  		}
   848  		csvWriter.Flush()
   849  		err = csvWriter.Error()
   850  		assert.Nil(t, err)
   851  		assert.Equal(t, buf.Bytes(), vec.Entries[0].Data)
   852  
   853  		// write to existed
   854  		vec = IOVector{
   855  			FilePath: filePath,
   856  			Entries: []IOEntry{
   857  				{
   858  					ReaderForWrite: bytes.NewReader([]byte("abc")),
   859  					Size:           -1,
   860  				},
   861  			},
   862  		}
   863  		err = fs.Write(ctx, vec)
   864  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrFileAlreadyExists))
   865  
   866  		// cancel write
   867  		reader, writer = io.Pipe()
   868  		defer reader.Close()
   869  		defer writer.Close()
   870  		vec = IOVector{
   871  			FilePath: "bar",
   872  			Entries: []IOEntry{
   873  				{
   874  					ReaderForWrite: reader,
   875  					Size:           -1,
   876  				},
   877  			},
   878  		}
   879  		ctx, cancel := context.WithCancel(context.Background())
   880  		cancel()
   881  		errCh := make(chan error)
   882  		go func() {
   883  			err := fs.Write(ctx, vec)
   884  			errCh <- err
   885  		}()
   886  		select {
   887  		case err := <-errCh:
   888  			assert.True(t, errors.Is(err, context.Canceled))
   889  		case <-time.After(time.Second * 10):
   890  			t.Fatal("should cancel")
   891  		}
   892  
   893  	})
   894  
   895  	t.Run("context cancel", func(t *testing.T) {
   896  		fs := newFS(fsName)
   897  		ctx, cancel := context.WithCancel(context.Background())
   898  		cancel()
   899  
   900  		err := fs.Write(ctx, IOVector{})
   901  		assert.ErrorIs(t, err, context.Canceled)
   902  
   903  		err = fs.Read(ctx, &IOVector{})
   904  		assert.ErrorIs(t, err, context.Canceled)
   905  
   906  		_, err = fs.List(ctx, "")
   907  		assert.ErrorIs(t, err, context.Canceled)
   908  
   909  		err = fs.Delete(ctx, "")
   910  		assert.ErrorIs(t, err, context.Canceled)
   911  	})
   912  
   913  }
   914  
   915  func randomSplit(data []byte, maxLen int) (ret [][]byte) {
   916  	for {
   917  		if len(data) == 0 {
   918  			return
   919  		}
   920  		if len(data) < maxLen {
   921  			ret = append(ret, data)
   922  			return
   923  		}
   924  		cut := 1 + mrand.Intn(maxLen)
   925  		ret = append(ret, data[:cut])
   926  		data = data[cut:]
   927  	}
   928  }
   929  
   930  func fixedSplit(data []byte, l int) (ret [][]byte) {
   931  	for {
   932  		if len(data) == 0 {
   933  			return
   934  		}
   935  		if len(data) < l {
   936  			ret = append(ret, data)
   937  			return
   938  		}
   939  		ret = append(ret, data[:l])
   940  		data = data[l:]
   941  	}
   942  }