github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/dbnode/integration/peers_bootstrap_index_aggregate_test.go (about) 1 // +build integration 2 3 // Copyright (c) 2019 Uber Technologies, Inc. 4 // 5 // Permission is hereby granted, free of charge, to any person obtaining a copy 6 // of this software and associated documentation files (the "Software"), to deal 7 // in the Software without restriction, including without limitation the rights 8 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 // copies of the Software, and to permit persons to whom the Software is 10 // furnished to do so, subject to the following conditions: 11 // 12 // The above copyright notice and this permission notice shall be included in 13 // all copies or substantial portions of the Software. 14 // 15 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 // THE SOFTWARE. 22 23 package integration 24 25 import ( 26 "testing" 27 "time" 28 29 "github.com/m3db/m3/src/dbnode/integration/generate" 30 "github.com/m3db/m3/src/dbnode/namespace" 31 "github.com/m3db/m3/src/dbnode/retention" 32 "github.com/m3db/m3/src/dbnode/storage/index" 33 "github.com/m3db/m3/src/m3ninx/idx" 34 idxpersist "github.com/m3db/m3/src/m3ninx/persist" 35 "github.com/m3db/m3/src/x/ident" 36 xtest "github.com/m3db/m3/src/x/test" 37 38 "github.com/stretchr/testify/require" 39 ) 40 41 func TestPeersBootstrapIndexAggregateQuery(t *testing.T) { 42 if testing.Short() { 43 t.SkipNow() // Just skip if we're doing a short run 44 } 45 46 log := xtest.NewLogger(t) 47 blockSize := 2 * time.Hour 48 rOpts := retention.NewOptions(). 49 SetRetentionPeriod(20 * time.Hour). 50 SetBlockSize(blockSize). 51 SetBufferPast(10 * time.Minute). 52 SetBufferFuture(2 * time.Minute) 53 54 idxOpts := namespace.NewIndexOptions(). 55 SetEnabled(true). 56 SetBlockSize(2 * blockSize) 57 nOpts := namespace.NewOptions(). 58 SetRetentionOptions(rOpts). 59 SetIndexOptions(idxOpts) 60 ns1, err := namespace.NewMetadata(testNamespaces[0], nOpts) 61 require.NoError(t, err) 62 opts := NewTestOptions(t). 63 SetNamespaces([]namespace.Metadata{ns1}). 64 // Use TChannel clients for writing / reading because we want to target individual nodes at a time 65 // and not write/read all nodes in the cluster. 66 SetUseTChannelClientForWriting(true). 67 SetUseTChannelClientForReading(true) 68 69 setupOpts := []BootstrappableTestSetupOptions{ 70 {DisablePeersBootstrapper: true}, 71 { 72 DisableCommitLogBootstrapper: true, 73 DisablePeersBootstrapper: false, 74 }, 75 } 76 setups, closeFn := NewDefaultBootstrappableTestSetups(t, opts, setupOpts) 77 defer closeFn() 78 79 // Write test data for first node 80 // Write test data 81 now := setups[0].NowFn()() 82 83 fooSeries := generate.Series{ 84 ID: ident.StringID("foo"), 85 Tags: ident.NewTags(ident.StringTag("city", "new_york"), ident.StringTag("foo", "foo")), 86 } 87 88 barSeries := generate.Series{ 89 ID: ident.StringID("bar"), 90 Tags: ident.NewTags(ident.StringTag("city", "new_jersey")), 91 } 92 93 bazSeries := generate.Series{ 94 ID: ident.StringID("baz"), 95 Tags: ident.NewTags(ident.StringTag("city", "seattle")), 96 } 97 98 seriesMaps := generate.BlocksByStart([]generate.BlockConfig{ 99 { 100 IDs: []string{fooSeries.ID.String()}, 101 Tags: fooSeries.Tags, 102 NumPoints: 100, 103 Start: now.Add(-blockSize), 104 }, 105 { 106 IDs: []string{barSeries.ID.String()}, 107 Tags: barSeries.Tags, 108 NumPoints: 100, 109 Start: now.Add(-blockSize), 110 }, 111 { 112 IDs: []string{fooSeries.ID.String()}, 113 Tags: fooSeries.Tags, 114 NumPoints: 50, 115 Start: now, 116 }, 117 { 118 IDs: []string{bazSeries.ID.String()}, 119 Tags: bazSeries.Tags, 120 NumPoints: 50, 121 Start: now, 122 }, 123 }) 124 require.NoError(t, writeTestDataToDisk(ns1, setups[0], seriesMaps, 0)) 125 126 for blockStart, series := range seriesMaps { 127 docs := generate.ToDocMetadata(series) 128 require.NoError(t, writeTestIndexDataToDisk( 129 ns1, 130 setups[0].StorageOpts(), 131 idxpersist.DefaultIndexVolumeType, 132 blockStart, 133 setups[0].ShardSet().AllIDs(), 134 docs, 135 )) 136 } 137 138 // Start the first server with filesystem bootstrapper 139 require.NoError(t, setups[0].StartServer()) 140 141 // Start the remaining servers with peers and filesystem bootstrappers 142 setups[1:].parallel(func(s TestSetup) { 143 require.NoError(t, s.StartServer()) 144 }) 145 log.Debug("servers are now up") 146 147 // Stop the servers 148 defer func() { 149 setups.parallel(func(s TestSetup) { 150 require.NoError(t, s.StopServer()) 151 }) 152 log.Debug("servers are now down") 153 }() 154 155 // Verify in-memory data match what we expect 156 for _, setup := range setups { 157 verifySeriesMaps(t, setup, ns1.ID(), seriesMaps) 158 } 159 160 // Issue aggregate index queries to the second node which bootstrapped the metadata 161 session, err := setups[1].M3DBClient().DefaultSession() 162 require.NoError(t, err) 163 164 start := now.Add(-rOpts.RetentionPeriod()) 165 end := now.Add(blockSize) 166 queryOpts := index.AggregationOptions{ 167 QueryOptions: index.QueryOptions{StartInclusive: start, EndExclusive: end}, 168 } 169 170 // Match all new_*r* 171 regexpQuery, err := idx.NewRegexpQuery([]byte("city"), []byte("new_.*r.*")) 172 require.NoError(t, err) 173 iter, fetchResponse, err := session.Aggregate(ContextWithDefaultTimeout(), 174 ns1.ID(), index.Query{Query: regexpQuery}, queryOpts) 175 require.NoError(t, err) 176 exhaustive := fetchResponse.Exhaustive 177 require.True(t, exhaustive) 178 defer iter.Finalize() 179 180 verifyQueryAggregateMetadataResults(t, iter, exhaustive, 181 verifyQueryAggregateMetadataResultsOptions{ 182 exhaustive: true, 183 expected: map[tagName]aggregateTagValues{ 184 "city": { 185 "new_jersey": struct{}{}, 186 "new_york": struct{}{}, 187 }, 188 "foo": { 189 "foo": struct{}{}, 190 }, 191 }, 192 }) 193 194 // Match all *e*e* 195 regexpQuery, err = idx.NewRegexpQuery([]byte("city"), []byte(".*e.*e.*")) 196 require.NoError(t, err) 197 iter, fetchResponse, err = session.Aggregate(ContextWithDefaultTimeout(), 198 ns1.ID(), index.Query{Query: regexpQuery}, queryOpts) 199 require.NoError(t, err) 200 exhaustive = fetchResponse.Exhaustive 201 defer iter.Finalize() 202 203 verifyQueryAggregateMetadataResults(t, iter, exhaustive, 204 verifyQueryAggregateMetadataResultsOptions{ 205 exhaustive: true, 206 expected: map[tagName]aggregateTagValues{ 207 "city": { 208 "new_jersey": struct{}{}, 209 "seattle": struct{}{}, 210 }, 211 }, 212 }) 213 214 // Now test term filtering, match all new_*r*, filtering on `foo` 215 regexpQuery, err = idx.NewRegexpQuery([]byte("city"), []byte("new_.*r.*")) 216 require.NoError(t, err) 217 queryOpts.FieldFilter = index.AggregateFieldFilter([][]byte{[]byte("foo")}) 218 iter, fetchResponse, err = session.Aggregate(ContextWithDefaultTimeout(), 219 ns1.ID(), index.Query{Query: regexpQuery}, queryOpts) 220 require.NoError(t, err) 221 exhaustive = fetchResponse.Exhaustive 222 require.True(t, exhaustive) 223 defer iter.Finalize() 224 225 verifyQueryAggregateMetadataResults(t, iter, exhaustive, 226 verifyQueryAggregateMetadataResultsOptions{ 227 exhaustive: true, 228 expected: map[tagName]aggregateTagValues{ 229 "foo": { 230 "foo": struct{}{}, 231 }, 232 }, 233 }) 234 235 // Now test term filter and tag name filtering, match all new_*r*, names only, filtering on `city` 236 regexpQuery, err = idx.NewRegexpQuery([]byte("city"), []byte("new_.*r.*")) 237 require.NoError(t, err) 238 queryOpts.FieldFilter = index.AggregateFieldFilter([][]byte{[]byte("city")}) 239 queryOpts.Type = index.AggregateTagNames 240 iter, fetchResponse, err = session.Aggregate(ContextWithDefaultTimeout(), 241 ns1.ID(), index.Query{Query: regexpQuery}, queryOpts) 242 require.NoError(t, err) 243 exhaustive = fetchResponse.Exhaustive 244 require.True(t, exhaustive) 245 defer iter.Finalize() 246 247 verifyQueryAggregateMetadataResults(t, iter, exhaustive, 248 verifyQueryAggregateMetadataResultsOptions{ 249 exhaustive: true, 250 expected: map[tagName]aggregateTagValues{ 251 "city": nil, 252 }, 253 }) 254 }