github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/dbnode/integration/index_multiple_node_high_concurrency_test.go (about) 1 // +build integration 2 // 3 // Copyright (c) 2016 Uber Technologies, Inc. 4 // 5 // Permission is hereby granted, free of charge, to any person obtaining a copy 6 // of this software and associated documentation files (the "Software"), to deal 7 // in the Software without restriction, including without limitation the rights 8 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 // copies of the Software, and to permit persons to whom the Software is 10 // furnished to do so, subject to the following conditions: 11 // 12 // The above copyright notice and this permission notice shall be included in 13 // all copies or substantial portions of the Software. 14 // 15 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 // THE SOFTWARE. 22 23 package integration 24 25 import ( 26 "fmt" 27 "sync" 28 "sync/atomic" 29 "testing" 30 "time" 31 32 "github.com/m3db/m3/src/cluster/services" 33 "github.com/m3db/m3/src/cluster/shard" 34 "github.com/m3db/m3/src/dbnode/client" 35 "github.com/m3db/m3/src/dbnode/topology" 36 xclock "github.com/m3db/m3/src/x/clock" 37 xtime "github.com/m3db/m3/src/x/time" 38 39 "github.com/stretchr/testify/assert" 40 "github.com/stretchr/testify/require" 41 "go.uber.org/zap" 42 ) 43 44 func TestIndexMultipleNodeHighConcurrency(t *testing.T) { 45 if testing.Short() { 46 t.SkipNow() // Just skip if we're doing a short run 47 } 48 var ( 49 concurrency = 10 50 writeEach = 100 51 numTags = 10 52 ) 53 54 levels := []topology.ReadConsistencyLevel{ 55 topology.ReadConsistencyLevelOne, 56 topology.ReadConsistencyLevelUnstrictMajority, 57 topology.ReadConsistencyLevelMajority, 58 topology.ReadConsistencyLevelUnstrictAll, 59 topology.ReadConsistencyLevelAll, 60 } 61 for _, lvl := range levels { 62 t.Run( 63 fmt.Sprintf("running test for %v", lvl), 64 func(t *testing.T) { 65 numShards := defaultNumShards 66 minShard := uint32(0) 67 maxShard := uint32(numShards - 1) 68 69 instances := []services.ServiceInstance{ 70 node(t, 0, newClusterShardsRange(minShard, maxShard, shard.Available)), 71 node(t, 1, newClusterShardsRange(minShard, maxShard, shard.Available)), 72 node(t, 2, newClusterShardsRange(minShard, maxShard, shard.Available)), 73 } 74 // nodes = m3db nodes 75 nodes, closeFn, clientopts := makeMultiNodeSetup(t, numShards, true, true, instances) //nolint:govet 76 clientopts = clientopts.SetReadConsistencyLevel(lvl) 77 78 defer closeFn() 79 log := nodes[0].StorageOpts().InstrumentOptions().Logger() 80 // Start the nodes 81 for _, n := range nodes { 82 require.NoError(t, n.StartServer()) 83 } 84 85 c, err := client.NewClient(clientopts) 86 require.NoError(t, err) 87 session, err := c.NewSession() 88 require.NoError(t, err) 89 defer session.Close() 90 91 var ( 92 insertWg sync.WaitGroup 93 numTotalErrors uint32 94 ) 95 now := xtime.ToUnixNano(nodes[0].DB().Options().ClockOptions().NowFn()()) 96 start := time.Now() 97 log.Info("starting data write") 98 99 for i := 0; i < concurrency; i++ { 100 insertWg.Add(1) 101 idx := i 102 go func() { 103 numErrors := uint32(0) 104 for j := 0; j < writeEach; j++ { 105 id, tags := genIDTags(idx, j, numTags) 106 err := session.WriteTagged(testNamespaces[0], id, tags, now, float64(1.0), xtime.Second, nil) 107 if err != nil { 108 numErrors++ 109 } 110 } 111 atomic.AddUint32(&numTotalErrors, numErrors) 112 insertWg.Done() 113 }() 114 } 115 116 insertWg.Wait() 117 require.Zero(t, numTotalErrors) 118 log.Info("test data written", zap.Duration("took", time.Since(start))) 119 log.Info("waiting to see if data is indexed") 120 121 var ( 122 indexTimeout = 10 * time.Second 123 fetchWg sync.WaitGroup 124 ) 125 for i := 0; i < concurrency; i++ { 126 fetchWg.Add(1) 127 idx := i 128 go func() { 129 id, tags := genIDTags(idx, writeEach-1, numTags) 130 indexed := xclock.WaitUntil(func() bool { 131 found := isIndexed(t, session, testNamespaces[0], id, tags) 132 return found 133 }, indexTimeout) 134 assert.True(t, indexed, "timed out waiting for index retrieval") 135 fetchWg.Done() 136 }() 137 } 138 fetchWg.Wait() 139 log.Info("data is indexed", zap.Duration("took", time.Since(start))) 140 }) 141 } 142 }