github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/dbnode/integration/write_quorum_test.go (about) 1 //go:build integration 2 // +build integration 3 4 // Copyright (c) 2016 Uber Technologies, Inc. 5 // 6 // Permission is hereby granted, free of charge, to any person obtaining a copy 7 // of this software and associated documentation files (the "Software"), to deal 8 // in the Software without restriction, including without limitation the rights 9 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 // copies of the Software, and to permit persons to whom the Software is 11 // furnished to do so, subject to the following conditions: 12 // 13 // The above copyright notice and this permission notice shall be included in 14 // all copies or substantial portions of the Software. 15 // 16 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 // THE SOFTWARE. 23 24 package integration 25 26 import ( 27 "testing" 28 "time" 29 30 "github.com/m3db/m3/src/cluster/services" 31 "github.com/m3db/m3/src/cluster/shard" 32 "github.com/m3db/m3/src/dbnode/client" 33 "github.com/m3db/m3/src/dbnode/integration/generate" 34 "github.com/m3db/m3/src/dbnode/namespace" 35 "github.com/m3db/m3/src/dbnode/topology" 36 "github.com/m3db/m3/src/x/ident" 37 xtime "github.com/m3db/m3/src/x/time" 38 39 "github.com/stretchr/testify/assert" 40 "github.com/stretchr/testify/require" 41 ) 42 43 func TestNormalQuorumOnlyOneUp(t *testing.T) { 44 if testing.Short() { 45 t.SkipNow() 46 } 47 48 numShards := defaultNumShards 49 minShard := uint32(0) 50 maxShard := uint32(numShards - 1) 51 52 // nodes = m3db nodes 53 nodes, closeFn, testWrite := makeTestWrite(t, numShards, []services.ServiceInstance{ 54 node(t, 0, newClusterShardsRange(minShard, maxShard, shard.Available)), 55 node(t, 1, newClusterShardsRange(minShard, maxShard, shard.Available)), 56 node(t, 2, newClusterShardsRange(minShard, maxShard, shard.Available)), 57 }, false) 58 defer closeFn() 59 60 require.NoError(t, nodes[0].StartServer()) 61 defer func() { require.NoError(t, nodes[0].StopServer()) }() 62 63 // Writes succeed to one node 64 assert.NoError(t, testWrite(topology.ConsistencyLevelOne)) 65 assert.Error(t, testWrite(topology.ConsistencyLevelMajority)) 66 assert.Error(t, testWrite(topology.ConsistencyLevelAll)) 67 } 68 69 func TestNormalQuorumOnlyTwoUp(t *testing.T) { 70 if testing.Short() { 71 t.SkipNow() 72 } 73 74 numShards := defaultNumShards 75 minShard := uint32(0) 76 maxShard := uint32(numShards - 1) 77 78 // nodes = m3db nodes 79 nodes, closeFn, testWrite := makeTestWrite(t, numShards, []services.ServiceInstance{ 80 node(t, 0, newClusterShardsRange(minShard, maxShard, shard.Available)), 81 node(t, 1, newClusterShardsRange(minShard, maxShard, shard.Available)), 82 node(t, 2, newClusterShardsRange(minShard, maxShard, shard.Available)), 83 }, false) 84 defer closeFn() 85 86 require.NoError(t, nodes[0].StartServer()) 87 defer func() { require.NoError(t, nodes[0].StopServer()) }() 88 require.NoError(t, nodes[1].StartServer()) 89 defer func() { require.NoError(t, nodes[1].StopServer()) }() 90 91 // Writes succeed to two nodes 92 assert.NoError(t, testWrite(topology.ConsistencyLevelOne)) 93 assert.NoError(t, testWrite(topology.ConsistencyLevelMajority)) 94 assert.Error(t, testWrite(topology.ConsistencyLevelAll)) 95 } 96 97 func TestNormalQuorumAllUp(t *testing.T) { 98 if testing.Short() { 99 t.SkipNow() 100 } 101 102 numShards := defaultNumShards 103 minShard := uint32(0) 104 maxShard := uint32(numShards - 1) 105 106 // nodes = m3db nodes 107 nodes, closeFn, testWrite := makeTestWrite(t, numShards, []services.ServiceInstance{ 108 node(t, 0, newClusterShardsRange(minShard, maxShard, shard.Available)), 109 node(t, 1, newClusterShardsRange(minShard, maxShard, shard.Available)), 110 node(t, 2, newClusterShardsRange(minShard, maxShard, shard.Available)), 111 }, false) 112 defer closeFn() 113 114 require.NoError(t, nodes[0].StartServer()) 115 defer func() { require.NoError(t, nodes[0].StopServer()) }() 116 require.NoError(t, nodes[1].StartServer()) 117 defer func() { require.NoError(t, nodes[1].StopServer()) }() 118 require.NoError(t, nodes[2].StartServer()) 119 defer func() { require.NoError(t, nodes[2].StopServer()) }() 120 121 // Writes succeed to all nodes 122 assert.NoError(t, testWrite(topology.ConsistencyLevelOne)) 123 assert.NoError(t, testWrite(topology.ConsistencyLevelMajority)) 124 assert.NoError(t, testWrite(topology.ConsistencyLevelAll)) 125 } 126 127 func TestAddNodeQuorumOnlyLeavingInitializingUp(t *testing.T) { 128 if testing.Short() { 129 t.SkipNow() 130 } 131 132 numShards := defaultNumShards 133 minShard := uint32(0) 134 maxShard := uint32(numShards - 1) 135 136 // nodes = m3db nodes 137 nodes, closeFn, testWrite := makeTestWrite(t, numShards, []services.ServiceInstance{ 138 node(t, 0, newClusterShardsRange(minShard, maxShard, shard.Leaving)), 139 node(t, 1, newClusterShardsRange(minShard, maxShard, shard.Available)), 140 node(t, 2, newClusterShardsRange(minShard, maxShard, shard.Available)), 141 node(t, 3, newClusterShardsRange(minShard, maxShard, shard.Initializing)), 142 }, false) 143 defer closeFn() 144 145 require.NoError(t, nodes[0].StartServer()) 146 defer func() { require.NoError(t, nodes[0].StopServer()) }() 147 148 require.NoError(t, nodes[3].StartServerDontWaitBootstrap()) 149 defer func() { require.NoError(t, nodes[3].StopServer()) }() 150 151 // No writes succeed to available nodes 152 assert.Error(t, testWrite(topology.ConsistencyLevelOne)) 153 assert.Error(t, testWrite(topology.ConsistencyLevelMajority)) 154 assert.Error(t, testWrite(topology.ConsistencyLevelAll)) 155 } 156 157 func TestAddNodeQuorumOnlyOneNormalAndLeavingInitializingUp(t *testing.T) { 158 if testing.Short() { 159 t.SkipNow() 160 } 161 162 numShards := defaultNumShards 163 minShard := uint32(0) 164 maxShard := uint32(numShards - 1) 165 166 // nodes = m3db nodes 167 nodes, closeFn, testWrite := makeTestWrite(t, numShards, []services.ServiceInstance{ 168 node(t, 0, newClusterShardsRange(minShard, maxShard, shard.Leaving)), 169 node(t, 1, newClusterShardsRange(minShard, maxShard, shard.Available)), 170 node(t, 2, newClusterShardsRange(minShard, maxShard, shard.Available)), 171 node(t, 3, newClusterShardsRange(minShard, maxShard, shard.Initializing)), 172 }, false) 173 defer closeFn() 174 175 require.NoError(t, nodes[0].StartServer()) 176 defer func() { require.NoError(t, nodes[0].StopServer()) }() 177 require.NoError(t, nodes[1].StartServer()) 178 defer func() { require.NoError(t, nodes[1].StopServer()) }() 179 require.NoError(t, nodes[3].StartServerDontWaitBootstrap()) 180 defer func() { require.NoError(t, nodes[3].StopServer()) }() 181 182 // Writes succeed to one available node 183 assert.NoError(t, testWrite(topology.ConsistencyLevelOne)) 184 assert.Error(t, testWrite(topology.ConsistencyLevelMajority)) 185 assert.Error(t, testWrite(topology.ConsistencyLevelAll)) 186 } 187 188 func TestReplaceNodeWithShardsLeavingAndInitializingCountTowardsConsistencySet(t *testing.T) { 189 if testing.Short() { 190 t.SkipNow() 191 } 192 193 numShards := defaultNumShards 194 minShard := uint32(0) 195 maxShard := uint32(numShards - 1) 196 197 initShards := newClusterShardsRange(minShard, maxShard, shard.Initializing) 198 for i := minShard; i < maxShard; i++ { 199 shard, _ := initShards.Shard(i) 200 shard.SetSourceID("testhost0") 201 } 202 203 // nodes = m3db nodes 204 nodes, closeFunc, testWrite := makeTestWrite(t, numShards, []services.ServiceInstance{ 205 node(t, 0, newClusterShardsRange(minShard, maxShard, shard.Leaving)), 206 node(t, 1, newClusterShardsRange(minShard, maxShard, shard.Available)), 207 node(t, 2, newClusterShardsRange(minShard, maxShard, shard.Available)), 208 node(t, 3, initShards), 209 }, true) 210 defer closeFunc() 211 212 require.NoError(t, nodes[0].StartServer()) 213 defer func() { require.NoError(t, nodes[0].StopServer()) }() 214 require.NoError(t, nodes[1].StartServer()) 215 defer func() { require.NoError(t, nodes[1].StopServer()) }() 216 require.NoError(t, nodes[3].StartServerDontWaitBootstrap()) 217 defer func() { require.NoError(t, nodes[3].StopServer()) }() 218 219 // Writes succeed to one available node and on both leaving and initializing node. 220 assert.NoError(t, testWrite(topology.ConsistencyLevelOne)) 221 assert.NoError(t, testWrite(topology.ConsistencyLevelMajority)) 222 assert.Error(t, testWrite(topology.ConsistencyLevelAll)) 223 } 224 225 func TestMultipleReplaceNodeWithShardsLeavingAndInitializingCountTowardsConsistencySet(t *testing.T) { 226 if testing.Short() { 227 t.SkipNow() 228 } 229 230 numShards := defaultNumShards 231 minShard := uint32(0) 232 maxShard := uint32(numShards - 1) 233 234 // 1st replace with testhost0 as source node. 235 initShards1 := newClusterShardsRange(minShard, maxShard, shard.Initializing) 236 for i := minShard; i < maxShard; i++ { 237 shard, _ := initShards1.Shard(i) 238 shard.SetSourceID("testhost0") 239 } 240 241 // 2nd replace with testhost1 as source node. 242 initShards2 := newClusterShardsRange(minShard, maxShard, shard.Initializing) 243 for i := minShard; i < maxShard; i++ { 244 shard, _ := initShards2.Shard(i) 245 shard.SetSourceID("testhost1") 246 } 247 248 // nodes = m3db nodes 249 nodes, closeFunc, testWrite := makeTestWrite(t, numShards, []services.ServiceInstance{ 250 node(t, 0, newClusterShardsRange(minShard, maxShard, shard.Leaving)), 251 node(t, 1, newClusterShardsRange(minShard, maxShard, shard.Leaving)), 252 node(t, 2, newClusterShardsRange(minShard, maxShard, shard.Available)), 253 node(t, 3, initShards1), 254 node(t, 4, initShards2), 255 }, true) 256 defer closeFunc() 257 258 require.NoError(t, nodes[0].StartServer()) 259 defer func() { require.NoError(t, nodes[0].StopServer()) }() 260 require.NoError(t, nodes[1].StartServer()) 261 defer func() { require.NoError(t, nodes[1].StopServer()) }() 262 require.NoError(t, nodes[3].StartServerDontWaitBootstrap()) 263 defer func() { require.NoError(t, nodes[3].StopServer()) }() 264 require.NoError(t, nodes[4].StartServerDontWaitBootstrap()) 265 defer func() { require.NoError(t, nodes[4].StopServer()) }() 266 267 // Writes succeed to both leaving and initializing pairs. 268 assert.NoError(t, testWrite(topology.ConsistencyLevelOne)) 269 assert.NoError(t, testWrite(topology.ConsistencyLevelMajority)) 270 assert.Error(t, testWrite(topology.ConsistencyLevelAll)) 271 } 272 273 func TestAddNodeQuorumAllUp(t *testing.T) { 274 if testing.Short() { 275 t.SkipNow() 276 } 277 278 numShards := defaultNumShards 279 minShard := uint32(0) 280 maxShard := uint32(numShards - 1) 281 282 // nodes = m3db nodes 283 nodes, closeFn, testWrite := makeTestWrite(t, numShards, []services.ServiceInstance{ 284 node(t, 0, newClusterShardsRange(minShard, maxShard, shard.Leaving)), 285 node(t, 1, newClusterShardsRange(minShard, maxShard, shard.Available)), 286 node(t, 2, newClusterShardsRange(minShard, maxShard, shard.Available)), 287 node(t, 3, newClusterShardsRange(minShard, maxShard, shard.Initializing)), 288 }, false) 289 defer closeFn() 290 291 require.NoError(t, nodes[0].StartServer()) 292 defer func() { require.NoError(t, nodes[0].StopServer()) }() 293 require.NoError(t, nodes[1].StartServer()) 294 defer func() { require.NoError(t, nodes[1].StopServer()) }() 295 require.NoError(t, nodes[2].StartServer()) 296 defer func() { require.NoError(t, nodes[2].StopServer()) }() 297 require.NoError(t, nodes[3].StartServerDontWaitBootstrap()) 298 defer func() { require.NoError(t, nodes[3].StopServer()) }() 299 300 // Writes succeed to two available nodes 301 assert.NoError(t, testWrite(topology.ConsistencyLevelOne)) 302 assert.NoError(t, testWrite(topology.ConsistencyLevelMajority)) 303 assert.Error(t, testWrite(topology.ConsistencyLevelAll)) 304 } 305 306 type testWriteFn func(topology.ConsistencyLevel) error 307 308 func makeTestWrite( 309 t *testing.T, 310 numShards int, 311 instances []services.ServiceInstance, 312 isShardsLeavingAndInitializingCountTowardsConsistency bool, 313 ) (testSetups, closeFn, testWriteFn) { 314 nsOpts := namespace.NewOptions() 315 md, err := namespace.NewMetadata(testNamespaces[0], 316 nsOpts.SetRetentionOptions(nsOpts.RetentionOptions().SetRetentionPeriod(6*time.Hour))) 317 require.NoError(t, err) 318 319 nspaces := []namespace.Metadata{md} 320 nodes, topoInit, closeFn := newNodes(t, numShards, instances, nspaces, false) 321 now := nodes[0].NowFn()() 322 323 for _, node := range nodes { 324 node.SetOpts(node.Opts().SetNumShards(numShards)) 325 for _, ns := range node.Namespaces() { 326 // write empty data files to disk so nodes could bootstrap 327 require.NoError(t, writeTestDataToDisk(ns, node, generate.SeriesBlocksByStart{}, 0)) 328 } 329 } 330 331 clientopts := client.NewOptions(). 332 SetClusterConnectConsistencyLevel(topology.ConnectConsistencyLevelNone). 333 SetClusterConnectTimeout(2 * time.Second). 334 SetWriteRequestTimeout(2 * time.Second). 335 SetTopologyInitializer(topoInit). 336 SetShardsLeavingAndInitializingCountTowardsConsistency(isShardsLeavingAndInitializingCountTowardsConsistency) 337 338 testWrite := func(cLevel topology.ConsistencyLevel) error { 339 clientopts = clientopts.SetWriteConsistencyLevel(cLevel) 340 c, err := client.NewClient(clientopts) 341 require.NoError(t, err) 342 343 s, err := c.NewSession() 344 require.NoError(t, err) 345 346 return s.Write(nspaces[0].ID(), ident.StringID("quorumTest"), now, 42, xtime.Second, nil) 347 } 348 349 return nodes, closeFn, testWrite 350 }