github.com/matrixorigin/matrixone@v1.2.0/pkg/fileservice/mem_cache_test.go (about) 1 // Copyright 2022 Matrix Origin 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package fileservice 16 17 import ( 18 "context" 19 "fmt" 20 "io" 21 "runtime" 22 "sync" 23 "testing" 24 25 "github.com/matrixorigin/matrixone/pkg/fileservice/memorycache" 26 "github.com/matrixorigin/matrixone/pkg/perfcounter" 27 "github.com/stretchr/testify/assert" 28 ) 29 30 func TestMemCacheLeak(t *testing.T) { 31 ctx := context.Background() 32 var counter perfcounter.CounterSet 33 ctx = perfcounter.WithCounterSet(ctx, &counter) 34 35 fs, err := NewMemoryFS("test", DisabledCacheConfig, nil) 36 assert.Nil(t, err) 37 err = fs.Write(ctx, IOVector{ 38 FilePath: "foo", 39 Entries: []IOEntry{ 40 { 41 Size: 3, 42 Data: []byte("foo"), 43 }, 44 }, 45 }) 46 assert.Nil(t, err) 47 48 size := int64(4 * runtime.GOMAXPROCS(0)) 49 m := NewMemCache(NewMemoryCache(size, true, nil), nil) 50 51 vec := &IOVector{ 52 FilePath: "foo", 53 Entries: []IOEntry{ 54 { 55 Size: 3, 56 ToCacheData: func(reader io.Reader, data []byte, allocator CacheDataAllocator) (memorycache.CacheData, error) { 57 cacheData := allocator.Alloc(1) 58 cacheData.Bytes()[0] = 42 59 return cacheData, nil 60 }, 61 }, 62 }, 63 } 64 err = m.Read(ctx, vec) 65 assert.Nil(t, err) 66 vec.Release() 67 err = fs.Read(ctx, vec) 68 assert.Nil(t, err) 69 err = m.Update(ctx, vec, false) 70 assert.Nil(t, err) 71 vec.Release() 72 assert.Equal(t, int64(1), m.cache.Capacity()-m.cache.Available()) 73 assert.Equal(t, int64(size), counter.FileService.Cache.Memory.Available.Load()) 74 assert.Equal(t, int64(0), counter.FileService.Cache.Memory.Used.Load()) 75 76 // read from cache 77 vec = &IOVector{ 78 FilePath: "foo", 79 Entries: []IOEntry{ 80 { 81 Size: 3, 82 ToCacheData: func(reader io.Reader, data []byte, allocator CacheDataAllocator) (memorycache.CacheData, error) { 83 cacheData := allocator.Alloc(1) 84 cacheData.Bytes()[0] = 42 85 return cacheData, nil 86 }, 87 }, 88 }, 89 } 90 err = m.Read(ctx, vec) 91 assert.Nil(t, err) 92 vec.Release() 93 err = fs.Read(ctx, vec) 94 assert.Nil(t, err) 95 err = m.Update(ctx, vec, false) 96 assert.Nil(t, err) 97 vec.Release() 98 assert.Equal(t, int64(1), m.cache.Capacity()-m.cache.Available()) 99 assert.Equal(t, int64(size)-1, counter.FileService.Cache.Memory.Available.Load()) 100 assert.Equal(t, int64(1), counter.FileService.Cache.Memory.Used.Load()) 101 102 } 103 104 // TestHighConcurrency this test is to mainly test concurrency issue in objectCache 105 // and dataOverlap-checker. 106 func TestHighConcurrency(t *testing.T) { 107 m := NewMemCache(NewMemoryCache(2, true, nil), nil) 108 ctx := context.Background() 109 110 n := 10 111 112 var vecArr []*IOVector 113 for i := 0; i < n; i++ { 114 vecArr = append(vecArr, 115 &IOVector{ 116 FilePath: fmt.Sprintf("key%d", i), 117 Entries: []IOEntry{ 118 { 119 Size: 4, 120 Data: []byte(fmt.Sprintf("val%d", i)), 121 }, 122 }, 123 }, 124 ) 125 } 126 127 // n go-routines are spun. 128 wg := sync.WaitGroup{} 129 wg.Add(n) 130 131 for i := 0; i < n; i++ { 132 // each go-routine tries to insert vecArr[i] for 10 times 133 // these updates should invoke postSet and postEvict inside objectCache. 134 // Since postSet and postEvict are guarded by objectCache mutex, this test 135 // should not throw concurrency related panics. 136 go func(idx int) { 137 for j := 0; j < 10; j++ { 138 _ = m.Update(ctx, vecArr[idx], false) 139 } 140 wg.Done() 141 }(i) 142 } 143 wg.Wait() 144 }