github.com/matrixorigin/matrixone@v1.2.0/pkg/util/export/etl/csv_test.go (about) 1 // Copyright 2022 Matrix Origin 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package etl 16 17 import ( 18 "context" 19 "os" 20 "path" 21 "path/filepath" 22 "testing" 23 24 "github.com/matrixorigin/matrixone/pkg/common/moerr" 25 "github.com/matrixorigin/matrixone/pkg/common/mpool" 26 "github.com/matrixorigin/matrixone/pkg/fileservice" 27 "github.com/matrixorigin/matrixone/pkg/testutil" 28 "github.com/matrixorigin/matrixone/pkg/util/toml" 29 "github.com/stretchr/testify/require" 30 ) 31 32 func TestLocalFSWriter(t *testing.T) { 33 ex, err := os.Executable() 34 require.Equal(t, nil, err) 35 t.Logf("executor: %s", ex) 36 selfDir, err := filepath.Abs(".") 37 require.Equal(t, nil, err) 38 basedir := t.TempDir() 39 t.Logf("whereami: %s, %s", selfDir, basedir) 40 41 ctx := context.Background() 42 memCacheCapacity := toml.ByteSize(mpool.MB) 43 fs, err := fileservice.NewLocalFS(ctx, "test", path.Join(basedir, "system"), fileservice.CacheConfig{ 44 MemoryCapacity: &memCacheCapacity, 45 }, nil) 46 require.Equal(t, nil, err) 47 // csv_test.go:23: whereami: /private/var/folders/lw/05zz3bq12djbnhv1wyzk2jgh0000gn/T/GoLand 48 // csv_test.go:40: write statement file error: size not match 49 // csv_test.go:50: write span file error: file existed 50 // file result: (has checksum) 51 // 9a80 8760 3132 3334 3536 3738 0a ...`12345678. 52 err = fs.Write(ctx, fileservice.IOVector{ // write-once-read-multi 53 FilePath: "statement_node_uuid_20220818_000000_123456", 54 Entries: []fileservice.IOEntry{ 55 { 56 Size: 4, 57 Data: []byte("1234"), 58 }, 59 { 60 Offset: 4, // offset is import 61 Size: 4, 62 Data: []byte("5678"), 63 }, 64 }, 65 }) 66 t.Logf("write statement file error: %v", err) 67 require.Equal(t, nil, err) 68 err = fs.Write(ctx, fileservice.IOVector{ 69 FilePath: "statement_node_uuid_20220818_000000_123456", 70 Entries: []fileservice.IOEntry{ 71 { 72 Offset: 8, // offset is import 73 Size: 4, 74 Data: []byte("321"), 75 }, 76 }, 77 }) 78 t.Logf("write statement file error: %v", err) 79 require.True(t, moerr.IsMoErrCode(err, moerr.ErrFileAlreadyExists)) 80 // file result: (has checksum) 81 // 3f3f 3a3f 3132 3334 0a ??:?1234. 82 err = fs.Write(ctx, fileservice.IOVector{ 83 FilePath: "span_node_uuid_20220818_000000_123456", // each file is only can open-write for one time. 84 Entries: []fileservice.IOEntry{ 85 { 86 Size: 4, 87 Data: []byte("1234"), 88 }, 89 }, 90 }) 91 t.Logf("write span file error: %v", err) 92 } 93 94 func TestLocalETLFSWriter(t *testing.T) { 95 basedir := t.TempDir() 96 fs, err := fileservice.NewLocalETLFS("etl", basedir) 97 require.Equal(t, nil, err) 98 99 // file result: (without checksum) 100 ctx := context.Background() 101 err = fs.Write(ctx, fileservice.IOVector{ // write-once-read-multi 102 FilePath: "statement_node_uuid_20220818_000000_123456", 103 Entries: []fileservice.IOEntry{ 104 { 105 Size: 4, 106 Data: []byte("1234"), 107 }, 108 { 109 Offset: 4, // offset is import 110 Size: 4, 111 Data: []byte("5678"), 112 }, 113 }, 114 }) 115 t.Logf("write statement file error: %v", err) 116 require.Equal(t, nil, err) 117 118 err = fs.Write(ctx, fileservice.IOVector{ 119 FilePath: "statement_node_uuid_20220818_000000_123456", 120 Entries: []fileservice.IOEntry{ 121 { 122 Offset: 8, // offset is import 123 Size: 4, 124 Data: []byte("321"), 125 }, 126 }, 127 }) 128 t.Logf("write statement file error: %v", err) 129 require.True(t, moerr.IsMoErrCode(err, moerr.ErrFileAlreadyExists)) 130 131 err = fs.Write(ctx, fileservice.IOVector{ 132 FilePath: "span_node_uuid_20220818_000000_123456", // each file is only can open-write for one time. 133 Entries: []fileservice.IOEntry{ 134 { 135 Size: 4, 136 Data: []byte("1234"), 137 }, 138 }, 139 }) 140 t.Logf("write span file error: %v", err) 141 } 142 143 func TestFSWriter_Write(t *testing.T) { 144 type fields struct { 145 ctx context.Context 146 fs fileservice.FileService 147 table string 148 database string 149 nodeUUID string 150 nodeType string 151 } 152 type args struct { 153 p []byte 154 } 155 156 fs := testutil.NewSharedFS() 157 tests := []struct { 158 name string 159 fields fields 160 args args 161 wantN int 162 wantErr bool 163 }{ 164 { 165 name: "local_fs", 166 fields: fields{ 167 ctx: context.Background(), 168 fs: fs, 169 table: "dummy", 170 database: "system", 171 nodeUUID: "node_uuid", 172 nodeType: "standalone", 173 }, 174 args: args{ 175 p: []byte(`0000000000000001,00000000-0000-0000-0000-000000000001,0000000000000000,node_uuid,Node,span1,1970-01-01 00:00:00.000000,1970-01-01 00:00:00.000001,1000,"{""Node"":{""node_uuid"":""node_uuid"",""node_type"":""Standalone""},""version"":""v0.test.0""}" 176 0000000000000002,00000000-0000-0000-0000-000000000001,0000000000000000,node_uuid,Node,span2,1970-01-01 00:00:00.000001,1970-01-01 00:00:00.001000,999000,"{""Node"":{""node_uuid"":""node_uuid"",""node_type"":""Standalone""},""version"":""v0.test.0""}" 177 `, 178 )}, 179 wantN: 500, 180 wantErr: false, 181 }, 182 } 183 for _, tt := range tests { 184 t.Run(tt.name, func(t *testing.T) { 185 w := NewFSWriter(tt.fields.ctx, tt.fields.fs, 186 WithFilePath("filepath"), 187 ) 188 gotN, err := w.Write(tt.args.p) 189 if (err != nil) != tt.wantErr { 190 t.Errorf("Write() error = %v, wantErr %v", err, tt.wantErr) 191 return 192 } 193 if gotN != tt.wantN { 194 t.Errorf("Write() gotN = %v, want %v", gotN, tt.wantN) 195 } 196 }) 197 } 198 }