github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/dbnode/integration/disk_cleanup_index_corrupted_test.go (about) 1 //go:build integration 2 // +build integration 3 4 // Copyright (c) 2021 Uber Technologies, Inc. 5 // 6 // Permission is hereby granted, free of charge, to any person obtaining a copy 7 // of this software and associated documentation files (the "Software"), to deal 8 // in the Software without restriction, including without limitation the rights 9 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 // copies of the Software, and to permit persons to whom the Software is 11 // furnished to do so, subject to the following conditions: 12 // 13 // The above copyright notice and this permission notice shall be included in 14 // all copies or substantial portions of the Software. 15 // 16 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 // THE SOFTWARE. 23 24 package integration 25 26 import ( 27 "os" 28 "reflect" 29 "sort" 30 "strings" 31 "testing" 32 "time" 33 34 "github.com/m3db/m3/src/dbnode/namespace" 35 "github.com/m3db/m3/src/dbnode/persist/fs" 36 "github.com/m3db/m3/src/dbnode/retention" 37 xclock "github.com/m3db/m3/src/x/clock" 38 xtime "github.com/m3db/m3/src/x/time" 39 40 "github.com/stretchr/testify/assert" 41 "github.com/stretchr/testify/require" 42 ) 43 44 func TestDiskCleanupIndexCorrupted(t *testing.T) { 45 if testing.Short() { 46 t.SkipNow() // Just skip if we're doing a short run 47 } 48 49 var ( 50 rOpts = retention.NewOptions().SetRetentionPeriod(48 * time.Hour) 51 nsBlockSize = time.Hour 52 idxBlockSize = 2 * time.Hour 53 54 nsROpts = rOpts.SetBlockSize(nsBlockSize) 55 idxOpts = namespace.NewIndexOptions().SetBlockSize(idxBlockSize).SetEnabled(true) 56 nsOpts = namespace.NewOptions(). 57 SetCleanupEnabled(true). 58 SetRetentionOptions(nsROpts). 59 SetIndexOptions(idxOpts) 60 ) 61 62 ns, err := namespace.NewMetadata(testNamespaces[0], nsOpts) 63 require.NoError(t, err) 64 65 opts := NewTestOptions(t). 66 SetNamespaces([]namespace.Metadata{ns}) 67 setup, err := NewTestSetup(t, opts, nil) 68 require.NoError(t, err) 69 defer setup.Close() 70 71 filePathPrefix := setup.StorageOpts().CommitLogOptions().FilesystemOptions().FilePathPrefix() 72 73 // Now create some fileset files 74 var ( 75 filesetsIdentifiers = make([]fs.FileSetFileIdentifier, 0) 76 numVolumes = 4 77 78 now = setup.NowFn()().Truncate(idxBlockSize) 79 blockStarts = []xtime.UnixNano{ 80 now.Add(-3 * idxBlockSize), 81 now.Add(-2 * idxBlockSize), 82 now.Add(-1 * idxBlockSize), 83 } 84 ) 85 for _, blockStart := range blockStarts { 86 for idx := 0; idx < numVolumes; idx++ { 87 filesetsIdentifiers = append(filesetsIdentifiers, fs.FileSetFileIdentifier{ 88 Namespace: ns.ID(), 89 BlockStart: blockStart, 90 VolumeIndex: idx, 91 }) 92 } 93 } 94 writeIndexFileSetFiles(t, setup.StorageOpts(), ns, filesetsIdentifiers) 95 96 filesets := fs.ReadIndexInfoFiles(fs.ReadIndexInfoFilesOptions{ 97 FilePathPrefix: filePathPrefix, 98 Namespace: ns.ID(), 99 ReaderBufferSize: setup.FilesystemOpts().InfoReaderBufferSize(), 100 }) 101 require.Len(t, filesets, len(blockStarts)*numVolumes) 102 103 filesThatShouldBeKept := make([]string, 0) 104 keep := func(files []string) { 105 filesThatShouldBeKept = append(filesThatShouldBeKept, files...) 106 } 107 // Corrupt some filesets. 108 forBlockStart(blockStarts[0], filesets, func(filesets []fs.ReadIndexInfoFileResult) { 109 keep(missingDigest(t, filesets[0])) // most recent volume index for volume type 110 corruptedInfo(t, filesets[1]) 111 missingInfo(t, filesets[2]) 112 keep(corruptedInfo(t, filesets[3])) // corrupted info files are kept if it's the most recent volume index 113 }) 114 115 forBlockStart(blockStarts[1], filesets, func(filesets []fs.ReadIndexInfoFileResult) { 116 keep(filesets[0].AbsoluteFilePaths) 117 missingDigest(t, filesets[1]) 118 missingDigest(t, filesets[2]) 119 keep(missingDigest(t, filesets[3])) // most recent volume index for volume type 120 }) 121 122 forBlockStart(blockStarts[2], filesets, func(filesets []fs.ReadIndexInfoFileResult) { 123 missingInfo(t, filesets[0]) 124 corruptedInfo(t, filesets[1]) 125 missingDigest(t, filesets[2]) 126 keep(filesets[3].AbsoluteFilePaths) // most recent volume index for volume type 127 }) 128 sort.Strings(filesThatShouldBeKept) 129 130 // Start the server 131 log := setup.StorageOpts().InstrumentOptions().Logger() 132 require.NoError(t, setup.StartServer()) 133 log.Debug("server is now up") 134 135 // Stop the server 136 defer func() { 137 require.NoError(t, setup.StopServer()) 138 log.Debug("server is now down") 139 }() 140 141 // Check if corrupted files have been deleted 142 waitTimeout := 30 * time.Second 143 deleted := xclock.WaitUntil(func() bool { 144 files, err := fs.IndexFileSetsBefore(filePathPrefix, ns.ID(), now.Add(time.Minute)) 145 require.NoError(t, err) 146 sort.Strings(files) 147 return reflect.DeepEqual(files, filesThatShouldBeKept) 148 }, waitTimeout) 149 if !assert.True(t, deleted) { 150 files, err := fs.IndexFileSetsBefore(filePathPrefix, ns.ID(), now.Add(time.Minute)) 151 require.NoError(t, err) 152 sort.Strings(files) 153 require.Equal(t, filesThatShouldBeKept, files) 154 } 155 } 156 157 func forBlockStart( 158 blockStart xtime.UnixNano, 159 filesets []fs.ReadIndexInfoFileResult, 160 fn func([]fs.ReadIndexInfoFileResult), 161 ) { 162 res := make([]fs.ReadIndexInfoFileResult, 0) 163 for _, f := range filesets { 164 if f.ID.BlockStart.Equal(blockStart) { 165 res = append(res, f) 166 } 167 } 168 sort.Slice(res, func(i, j int) bool { 169 return res[i].ID.VolumeIndex < res[j].ID.VolumeIndex 170 }) 171 fn(res) 172 } 173 174 func corruptedInfo(t *testing.T, fileset fs.ReadIndexInfoFileResult) []string { 175 for _, f := range fileset.AbsoluteFilePaths { 176 if strings.Contains(f, "info.db") { 177 require.NoError(t, os.Truncate(f, 0)) 178 return fileset.AbsoluteFilePaths 179 } 180 } 181 require.Fail(t, "could not find info file") 182 return nil 183 } 184 185 func missingInfo(t *testing.T, fileset fs.ReadIndexInfoFileResult) []string { //nolint:unparam 186 for i, f := range fileset.AbsoluteFilePaths { 187 if strings.Contains(f, "info.db") { 188 require.NoError(t, os.Remove(f)) 189 res := make([]string, 0) 190 res = append(res, fileset.AbsoluteFilePaths[:i]...) 191 res = append(res, fileset.AbsoluteFilePaths[i+1:]...) 192 return res 193 } 194 } 195 require.Fail(t, "could not find info file") 196 return nil 197 } 198 199 func missingDigest(t *testing.T, fileset fs.ReadIndexInfoFileResult) []string { 200 for i, f := range fileset.AbsoluteFilePaths { 201 if strings.Contains(f, "digest.db") { 202 require.NoError(t, os.Remove(f)) 203 res := make([]string, 0) 204 res = append(res, fileset.AbsoluteFilePaths[:i]...) 205 res = append(res, fileset.AbsoluteFilePaths[i+1:]...) 206 return res 207 } 208 } 209 require.Fail(t, "could not find digest file") 210 return nil 211 }