github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/dbnode/integration/disk_coldflush_test.go (about) 1 // +build integration 2 3 // Copyright (c) 2019 Uber Technologies, Inc. 4 // 5 // Permission is hereby granted, free of charge, to any person obtaining a copy 6 // of this software and associated documentation files (the "Software"), to deal 7 // in the Software without restriction, including without limitation the rights 8 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 // copies of the Software, and to permit persons to whom the Software is 10 // furnished to do so, subject to the following conditions: 11 // 12 // The above copyright notice and this permission notice shall be included in 13 // all copies or substantial portions of the Software. 14 // 15 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 // THE SOFTWARE. 22 23 package integration 24 25 import ( 26 "testing" 27 "time" 28 29 "github.com/m3db/m3/src/dbnode/integration/generate" 30 "github.com/m3db/m3/src/dbnode/namespace" 31 "github.com/m3db/m3/src/dbnode/persist/fs" 32 "github.com/m3db/m3/src/x/ident" 33 xtime "github.com/m3db/m3/src/x/time" 34 35 "github.com/stretchr/testify/require" 36 ) 37 38 func TestDiskColdFlushSimple(t *testing.T) { 39 if testing.Short() { 40 t.SkipNow() // Just skip if we're doing a short run. 41 } 42 // Test setup with cold-writes-enabled namespace. 43 nsOpts := namespace.NewOptions(). 44 SetRepairEnabled(false). 45 SetRetentionOptions(DefaultIntegrationTestRetentionOpts. 46 SetRetentionPeriod(12 * time.Hour)). 47 SetColdWritesEnabled(true) 48 nsID := ident.StringID("testColdWriteNs1") 49 ns, err := namespace.NewMetadata(nsID, nsOpts) 50 require.NoError(t, err) 51 testOpts := NewTestOptions(t). 52 SetTickMinimumInterval(time.Second). 53 SetNamespaces([]namespace.Metadata{ns}) 54 55 testSetup, err := NewTestSetup(t, testOpts, nil) 56 57 require.NoError(t, err) 58 defer testSetup.Close() 59 60 md := testSetup.NamespaceMetadataOrFail(nsID) 61 ropts := md.Options().RetentionOptions() 62 blockSize := ropts.BlockSize() 63 filePathPrefix := testSetup.StorageOpts().CommitLogOptions().FilesystemOptions().FilePathPrefix() 64 65 // Start the server. 66 log := testSetup.StorageOpts().InstrumentOptions().Logger() 67 log.Debug("disk coldflush test") 68 require.NoError(t, testSetup.StartServer()) 69 log.Debug("server is now up") 70 71 // Stop the server. 72 defer func() { 73 require.NoError(t, testSetup.StopServer()) 74 log.Debug("server is now down") 75 }() 76 77 // Write warm data first so that cold data will flush. 78 start := testSetup.NowFn()() 79 seriesMaps := make(map[xtime.UnixNano]generate.SeriesBlock) 80 warmData := []generate.BlockConfig{ 81 {IDs: []string{"warm1", "warm2"}, NumPoints: 100, Start: start}, 82 // The `coldOverwrite` series data produced from this will later be 83 // completely overwritten when cold data is flushed later in the test. 84 // In order to satisfy this assumption, `coldOverwrite` needs to be on 85 // the same block with equal or fewer NumPoints as its corresponding 86 // cold data. Since `coldOverwrite` warm data is later overwritten, 87 // we remove this from the expected `seriesMaps`. 88 {IDs: []string{"warm1", "warm3", "coldOverwrite"}, NumPoints: 50, Start: start.Add(blockSize)}, 89 } 90 91 expectedDataFiles := []fs.FileSetFileIdentifier{ 92 { 93 // warm1, start 94 Namespace: nsID, 95 Shard: 6, 96 BlockStart: start, 97 VolumeIndex: 0, 98 }, 99 { 100 // warm1, start + 1 101 Namespace: nsID, 102 Shard: 6, 103 BlockStart: start.Add(blockSize), 104 VolumeIndex: 0, 105 }, 106 { 107 // warm2, start 108 Namespace: nsID, 109 Shard: 11, 110 BlockStart: start, 111 VolumeIndex: 0, 112 }, 113 { 114 // warm3, start + 1 115 Namespace: nsID, 116 Shard: 2, 117 BlockStart: start.Add(blockSize), 118 VolumeIndex: 0, 119 }, 120 { 121 // coldWrite, start + 1 122 Namespace: nsID, 123 Shard: 8, 124 BlockStart: start.Add(blockSize), 125 VolumeIndex: 0, 126 }, 127 } 128 for _, input := range warmData { 129 testSetup.SetNowFn(input.Start) 130 testData := generate.Block(input) 131 seriesMaps[input.Start] = testData 132 require.NoError(t, testSetup.WriteBatch(nsID, testData)) 133 } 134 startPlusOneBlockNano := start.Add(blockSize) 135 // Remove warm data for `coldOverwrite`. See earlier comment for context. 136 seriesMaps[startPlusOneBlockNano] = 137 seriesMaps[startPlusOneBlockNano][:len(seriesMaps[startPlusOneBlockNano])-1] 138 log.Debug("warm data is now written") 139 140 // Advance time to make sure all data are flushed. Because data 141 // are flushed to disk asynchronously, need to poll to check 142 // when data are written. 143 testSetup.SetNowFn(testSetup.NowFn()().Add(blockSize * 2)) 144 maxWaitTime := time.Minute 145 require.NoError(t, waitUntilFileSetFilesExist(filePathPrefix, expectedDataFiles, maxWaitTime)) 146 147 // Verify on-disk data match what we expect. 148 verifyFlushedDataFiles(t, testSetup.ShardSet(), testSetup.StorageOpts(), nsID, seriesMaps) 149 150 coldData := []generate.BlockConfig{ 151 {IDs: []string{"cold0"}, NumPoints: 80, Start: start.Add(-blockSize)}, 152 {IDs: []string{"cold1", "cold2", "cold3"}, NumPoints: 30, Start: start}, 153 {IDs: []string{"cold1", "cold3", "coldOverwrite"}, NumPoints: 100, Start: start.Add(blockSize)}, 154 } 155 // Set "now" to start + 3 * blockSize so that the above are cold writes. 156 testSetup.SetNowFn(start.Add(blockSize * 3)) 157 for _, input := range coldData { 158 testData := generate.Block(input) 159 seriesMaps[input.Start] = append(seriesMaps[input.Start], testData...) 160 require.NoError(t, testSetup.WriteBatch(nsID, testData)) 161 } 162 log.Debug("cold data is now written") 163 164 expectedDataFiles = []fs.FileSetFileIdentifier{ 165 { 166 // warm1, start 167 Namespace: nsID, 168 Shard: 6, 169 BlockStart: start, 170 VolumeIndex: 0, 171 }, 172 { 173 // warm2, start (creating volume 0) 174 // cold3, start (creating volume 1) 175 Namespace: nsID, 176 Shard: 11, 177 BlockStart: start, 178 VolumeIndex: 1, 179 }, 180 { 181 // warm1, start + 1 182 Namespace: nsID, 183 Shard: 6, 184 BlockStart: start.Add(blockSize), 185 VolumeIndex: 0, 186 }, 187 { 188 // warm3, start + 1 189 Namespace: nsID, 190 Shard: 2, 191 BlockStart: start.Add(blockSize), 192 VolumeIndex: 0, 193 }, 194 { 195 // cold0, start - 1 196 Namespace: nsID, 197 Shard: 2, 198 BlockStart: start.Add(-blockSize), 199 VolumeIndex: 1, 200 }, 201 { 202 // cold1, start 203 Namespace: nsID, 204 Shard: 4, 205 BlockStart: start, 206 VolumeIndex: 1, 207 }, 208 { 209 // cold2, start 210 Namespace: nsID, 211 Shard: 7, 212 BlockStart: start, 213 VolumeIndex: 1, 214 }, 215 { 216 // cold1, start + 1 217 Namespace: nsID, 218 Shard: 4, 219 BlockStart: start.Add(blockSize), 220 VolumeIndex: 1, 221 }, 222 { 223 // cold3, start + 1 224 Namespace: nsID, 225 Shard: 11, 226 BlockStart: start.Add(blockSize), 227 VolumeIndex: 1, 228 }, 229 { 230 // coldWrite, start + 1 231 Namespace: nsID, 232 Shard: 8, 233 BlockStart: start.Add(blockSize), 234 VolumeIndex: 1, 235 }, 236 } 237 238 require.NoError(t, waitUntilFileSetFilesExist(filePathPrefix, expectedDataFiles, maxWaitTime)) 239 240 // Verify on-disk data match what we expect 241 verifyFlushedDataFiles(t, testSetup.ShardSet(), testSetup.StorageOpts(), nsID, seriesMaps) 242 }