github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/dbnode/integration/write_read_high_concurrency_test.go (about)

     1  // +build integration
     2  //
     3  // Copyright (c) 2019 Uber Technologies, Inc.
     4  //
     5  // Permission is hereby granted, free of charge, to any person obtaining a copy
     6  // of this software and associated documentation files (the "Software"), to deal
     7  // in the Software without restriction, including without limitation the rights
     8  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     9  // copies of the Software, and to permit persons to whom the Software is
    10  // furnished to do so, subject to the following conditions:
    11  //
    12  // The above copyright notice and this permission notice shall be included in
    13  // all copies or substantial portions of the Software.
    14  //
    15  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    16  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    17  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    18  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    19  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    20  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    21  // THE SOFTWARE.
    22  
    23  package integration
    24  
    25  import (
    26  	"fmt"
    27  	"sync"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/m3db/m3/src/cluster/services"
    32  	"github.com/m3db/m3/src/cluster/shard"
    33  	"github.com/m3db/m3/src/dbnode/client"
    34  	"github.com/m3db/m3/src/dbnode/topology"
    35  	xclock "github.com/m3db/m3/src/x/clock"
    36  	"github.com/m3db/m3/src/x/ident"
    37  	xtime "github.com/m3db/m3/src/x/time"
    38  
    39  	"github.com/stretchr/testify/require"
    40  	"go.uber.org/zap"
    41  )
    42  
    43  // TestWriteReadHighConcurrencyTestMultiNS stress tests the conccurent write and read pathways in M3DB by spinning
    44  // up 100s of gorotuines that all write/read to M3DB. It was added as a regression test to catch bugs in the M3DB
    45  // client batching logic and lifecycles, but it is useful for detecting various kinds of concurrency issues at the
    46  // integration level.
    47  func TestWriteReadHighConcurrencyTestMultiNS(t *testing.T) {
    48  	if testing.Short() {
    49  		t.SkipNow() // Just skip if we're doing a short run
    50  	}
    51  	var (
    52  		concurrency = 100
    53  		writeEach   = 1000
    54  		numShards   = defaultNumShards
    55  		minShard    = uint32(0)
    56  		maxShard    = uint32(numShards - 1)
    57  		instances   = []services.ServiceInstance{
    58  			node(t, 0, newClusterShardsRange(minShard, maxShard, shard.Available)),
    59  			node(t, 1, newClusterShardsRange(minShard, maxShard, shard.Available)),
    60  			node(t, 2, newClusterShardsRange(minShard, maxShard, shard.Available)),
    61  		}
    62  	)
    63  	nodes, closeFn, clientopts := makeMultiNodeSetup(t, numShards, true, true, instances) //nolint:govet
    64  	clientopts = clientopts.
    65  		SetWriteConsistencyLevel(topology.ConsistencyLevelAll).
    66  		SetReadConsistencyLevel(topology.ReadConsistencyLevelAll)
    67  
    68  	defer closeFn()
    69  	log := nodes[0].StorageOpts().InstrumentOptions().Logger()
    70  	for _, n := range nodes {
    71  		require.NoError(t, n.StartServer())
    72  	}
    73  
    74  	c, err := client.NewClient(clientopts)
    75  	require.NoError(t, err)
    76  	session, err := c.NewSession()
    77  	require.NoError(t, err)
    78  	defer session.Close()
    79  
    80  	var insertWg sync.WaitGroup
    81  	now := xtime.ToUnixNano(nodes[0].DB().Options().ClockOptions().NowFn()())
    82  	start := time.Now()
    83  	log.Info("starting data write")
    84  
    85  	newNs1GenIDs := func(idx int) func(j int) ident.ID {
    86  		return func(j int) ident.ID {
    87  			id, _ := genIDTags(idx, j, 0)
    88  			return id
    89  		}
    90  	}
    91  	newNs2GenIDs := func(idx int) func(j int) ident.ID {
    92  		return func(j int) ident.ID {
    93  			id, _ := genIDTags(concurrency+idx, writeEach+j, 0)
    94  			return id
    95  		}
    96  	}
    97  	for i := 0; i < concurrency; i++ {
    98  		insertWg.Add(2)
    99  		idx := i
   100  		ns1GenIDs := newNs1GenIDs(idx)
   101  		ns2GenIDs := newNs2GenIDs(idx)
   102  		go func() {
   103  			defer insertWg.Done()
   104  			for j := 0; j < writeEach; j++ {
   105  				id := ns1GenIDs(j)
   106  				err := session.Write(testNamespaces[0], id, now, float64(1.0), xtime.Second, nil)
   107  				if err != nil {
   108  					panic(err)
   109  				}
   110  			}
   111  		}()
   112  		go func() {
   113  			defer insertWg.Done()
   114  			for j := 0; j < writeEach; j++ {
   115  				id := ns2GenIDs(j)
   116  				err := session.Write(testNamespaces[1], id, now, float64(1.0), xtime.Second, nil)
   117  				if err != nil {
   118  					panic(err)
   119  				}
   120  			}
   121  		}()
   122  	}
   123  
   124  	insertWg.Wait()
   125  	log.Info("test data written", zap.Duration("took", time.Since(start)))
   126  
   127  	var fetchWg sync.WaitGroup
   128  	for i := 0; i < concurrency; i++ {
   129  		fetchWg.Add(2)
   130  		idx := i
   131  		verify := func(genID func(j int) ident.ID, ns ident.ID) {
   132  			defer fetchWg.Done()
   133  			for j := 0; j < writeEach; j++ {
   134  				id := genID(j)
   135  				found := xclock.WaitUntil(func() bool {
   136  					iter, err := session.Fetch(ns, id, now.Add(-time.Hour), now.Add(time.Hour))
   137  					if err != nil {
   138  						panic(err)
   139  					}
   140  					if !iter.Next() {
   141  						return false
   142  					}
   143  					return true
   144  				}, 10*time.Second)
   145  				if !found {
   146  					panic(fmt.Sprintf("timed out waiting to fetch id: %s", id))
   147  				}
   148  			}
   149  		}
   150  		go verify(newNs1GenIDs(idx), testNamespaces[0])
   151  		go verify(newNs2GenIDs(idx), testNamespaces[1])
   152  	}
   153  	fetchWg.Wait()
   154  	log.Info("data is readable", zap.Duration("took", time.Since(start)))
   155  }