github.com/m3db/m3@v1.5.0/src/dbnode/client/session_fetch_high_concurrency_test.go (about)

     1  // Copyright (c) 2017 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package client
    22  
    23  import (
    24  	"fmt"
    25  	"math"
    26  	"math/rand"
    27  	"sync"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/golang/mock/gomock"
    32  	"github.com/stretchr/testify/assert"
    33  	"github.com/stretchr/testify/require"
    34  
    35  	"github.com/m3db/m3/src/cluster/shard"
    36  	"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
    37  	"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
    38  	"github.com/m3db/m3/src/dbnode/sharding"
    39  	"github.com/m3db/m3/src/dbnode/topology"
    40  	"github.com/m3db/m3/src/dbnode/ts"
    41  	"github.com/m3db/m3/src/x/ident"
    42  	xtime "github.com/m3db/m3/src/x/time"
    43  )
    44  
    45  func TestSessionFetchIDsHighConcurrency(t *testing.T) {
    46  	ctrl := gomock.NewController(t)
    47  	defer ctrl.Finish()
    48  
    49  	numShards := 1024
    50  	numReplicas := 3
    51  	numHosts := 8
    52  
    53  	concurrency := 4
    54  	fetchAllEach := 128
    55  
    56  	maxIDs := 0
    57  	fetchAllTypes := []struct {
    58  		numIDs int
    59  		ids    []string
    60  	}{
    61  		{numIDs: 16},
    62  		{numIDs: 32},
    63  	}
    64  	for i := range fetchAllTypes {
    65  		for j := 0; j < fetchAllTypes[i].numIDs; j++ {
    66  			fetchAllTypes[i].ids = append(fetchAllTypes[i].ids, fmt.Sprintf("foo.%d", j))
    67  		}
    68  		if fetchAllTypes[i].numIDs > maxIDs {
    69  			maxIDs = fetchAllTypes[i].numIDs
    70  		}
    71  	}
    72  
    73  	healthCheckResult := &rpc.NodeHealthResult_{Ok: true, Status: "ok", Bootstrapped: true}
    74  
    75  	start := xtime.Now().Truncate(time.Hour)
    76  	end := start.Add(2 * time.Hour)
    77  
    78  	encoder := m3tsz.NewEncoder(start, nil, true, nil)
    79  	for at := start; at.Before(end); at = at.Add(30 * time.Second) {
    80  		dp := ts.Datapoint{
    81  			TimestampNanos: at,
    82  			Value:          rand.Float64() * math.MaxFloat64, //nolint: gosec
    83  		}
    84  		encoder.Encode(dp, xtime.Second, nil)
    85  	}
    86  	seg := encoder.Discard()
    87  	respSegments := []*rpc.Segments{{
    88  		Merged: &rpc.Segment{Head: seg.Head.Bytes(), Tail: seg.Tail.Bytes()},
    89  	}}
    90  	respElements := make([]*rpc.FetchRawResult_, maxIDs)
    91  	for i := range respElements {
    92  		respElements[i] = &rpc.FetchRawResult_{Segments: respSegments}
    93  	}
    94  	respResult := &rpc.FetchBatchRawResult_{Elements: respElements}
    95  
    96  	// Override the new connection function for connection pools
    97  	// to be able to mock the entire end to end pipeline
    98  	newConnFn := func(
    99  		_ string, addr string, _ Options,
   100  	) (Channel, rpc.TChanNode, error) {
   101  		mockClient := rpc.NewMockTChanNode(ctrl)
   102  		mockClient.EXPECT().Health(gomock.Any()).
   103  			Return(healthCheckResult, nil).
   104  			AnyTimes()
   105  		mockClient.EXPECT().FetchBatchRaw(gomock.Any(), gomock.Any()).
   106  			Return(respResult, nil).
   107  			AnyTimes()
   108  		return &noopPooledChannel{}, mockClient, nil
   109  	}
   110  	shards := make([]shard.Shard, numShards)
   111  	for i := range shards {
   112  		shards[i] = shard.NewShard(uint32(i)).SetState(shard.Available)
   113  	}
   114  
   115  	shardSet, err := sharding.NewShardSet(shards, sharding.DefaultHashFn(numShards))
   116  	require.NoError(t, err)
   117  
   118  	hosts := make([]topology.Host, numHosts)
   119  	for i := range hosts {
   120  		id := testHostName(i)
   121  		hosts[i] = topology.NewHost(id, fmt.Sprintf("%s:9000", id))
   122  	}
   123  
   124  	shardAssignments := make([][]shard.Shard, numHosts)
   125  	allShards := shardSet.All()
   126  	host := 0
   127  	for i := 0; i < numReplicas; i++ {
   128  		for shard := 0; shard < numShards; shard++ {
   129  			placed := false
   130  			for !placed {
   131  				unique := true
   132  				for _, existing := range shardAssignments[host] {
   133  					if existing.ID() == uint32(shard) {
   134  						unique = false
   135  						break
   136  					}
   137  				}
   138  				if unique {
   139  					placed = true
   140  					shardAssignments[host] = append(shardAssignments[host], allShards[shard])
   141  				}
   142  				host++
   143  				if host >= len(hosts) {
   144  					host = 0
   145  				}
   146  			}
   147  			if !placed {
   148  				assert.Fail(t, "could not place shard")
   149  			}
   150  		}
   151  	}
   152  
   153  	hostShardSets := make([]topology.HostShardSet, numHosts)
   154  	for hostIdx, shards := range shardAssignments {
   155  		shardsSubset, err := sharding.NewShardSet(shards, shardSet.HashFn())
   156  		require.NoError(t, err)
   157  		hostShardSets[hostIdx] = topology.NewHostShardSet(hosts[hostIdx], shardsSubset)
   158  	}
   159  
   160  	opts := newSessionTestOptions().
   161  		SetFetchBatchSize(128).
   162  		SetNewConnectionFn(newConnFn).
   163  		SetTopologyInitializer(topology.NewStaticInitializer(
   164  			topology.NewStaticOptions().
   165  				SetReplicas(numReplicas).
   166  				SetShardSet(shardSet).
   167  				SetHostShardSets(sessionTestHostAndShards(shardSet))))
   168  
   169  	s, err := newSession(opts)
   170  	assert.NoError(t, err)
   171  	session := s.(*session)
   172  
   173  	require.NoError(t, session.Open())
   174  
   175  	var wg, startWg sync.WaitGroup
   176  	startWg.Add(1)
   177  	for i := 0; i < concurrency; i++ {
   178  		wg.Add(1)
   179  		go func() {
   180  			startWg.Wait()
   181  			defer wg.Done()
   182  
   183  			for j := 0; j < fetchAllEach; j++ {
   184  				ids := fetchAllTypes[j%len(fetchAllTypes)].ids
   185  				iters, err := session.FetchIDs(ident.StringID(testNamespaceName),
   186  					ident.NewStringIDsSliceIterator(ids), start, end)
   187  				if err != nil {
   188  					panic(err)
   189  				}
   190  				iters.Close()
   191  			}
   192  		}()
   193  	}
   194  
   195  	startWg.Done()
   196  
   197  	wg.Wait()
   198  
   199  	require.NoError(t, session.Close())
   200  }