github.com/m3db/m3@v1.5.0/src/dbnode/client/aggregate_results_accumulator_consistency_test.go (about)

     1  // Copyright (c) 2019 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package client
    22  
    23  import (
    24  	"fmt"
    25  	"testing"
    26  
    27  	"github.com/m3db/m3/src/cluster/shard"
    28  	"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
    29  	"github.com/m3db/m3/src/dbnode/topology"
    30  	tu "github.com/m3db/m3/src/dbnode/topology/testutil"
    31  	xtime "github.com/m3db/m3/src/x/time"
    32  )
    33  
    34  var (
    35  	testStartTime, testEndTime   xtime.UnixNano
    36  	testAggregateSuccessResponse = rpc.AggregateQueryRawResult_{}
    37  	errTestAggregate             = fmt.Errorf("random error")
    38  )
    39  
    40  func TestAggregateResultsAccumulatorAnyResponseShouldTerminateConsistencyLevelOneSimpleTopo(t *testing.T) {
    41  	// rf=3, 30 shards total; three identical hosts
    42  	topoMap := tu.MustNewTopologyMap(3, map[string][]shard.Shard{
    43  		"testhost0": tu.ShardsRange(0, 29, shard.Available),
    44  		"testhost1": tu.ShardsRange(0, 29, shard.Available),
    45  		"testhost2": tu.ShardsRange(0, 29, shard.Available),
    46  	})
    47  
    48  	// any response should satisfy consistency lvl one
    49  	for i := 0; i < 3; i++ {
    50  		testFetchStateWorkflow{
    51  			t:       t,
    52  			topoMap: topoMap,
    53  			level:   topology.ReadConsistencyLevelOne,
    54  			steps: []testFetchStateWorklowStep{
    55  				{
    56  					hostname:        fmt.Sprintf("testhost%d", i),
    57  					aggregateResult: &testAggregateSuccessResponse,
    58  					expectedDone:    true,
    59  				},
    60  			},
    61  		}.run()
    62  	}
    63  
    64  	// should terminate only after all failures, and say it failed
    65  	testFetchStateWorkflow{
    66  		t:       t,
    67  		topoMap: topoMap,
    68  		level:   topology.ReadConsistencyLevelOne,
    69  		steps: []testFetchStateWorklowStep{
    70  			{
    71  				hostname:     "testhost0",
    72  				aggregateErr: errTestAggregate,
    73  			},
    74  			{
    75  				hostname:     "testhost1",
    76  				aggregateErr: errTestAggregate,
    77  			},
    78  			{
    79  				hostname:     "testhost1",
    80  				aggregateErr: errTestAggregate,
    81  				expectedDone: true,
    82  				expectedErr:  true,
    83  			},
    84  		},
    85  	}.run()
    86  }
    87  
    88  func TestAggregateResultsAccumulatorShardAvailabilityIsEnforced(t *testing.T) {
    89  	// rf=3, 30 shards total; three identical hosts
    90  	topoMap := tu.MustNewTopologyMap(3, map[string][]shard.Shard{
    91  		"testhost0": tu.ShardsRange(0, 29, shard.Available),
    92  		"testhost1": tu.ShardsRange(0, 29, shard.Initializing),
    93  		"testhost2": tu.ShardsRange(0, 29, shard.Available),
    94  	})
    95  
    96  	// responses from testhost1 should not count towards success
    97  	// for consistency level 1
    98  	testFetchStateWorkflow{
    99  		t:       t,
   100  		topoMap: topoMap,
   101  		level:   topology.ReadConsistencyLevelOne,
   102  		steps: []testFetchStateWorklowStep{
   103  			{
   104  				hostname:        "testhost1",
   105  				aggregateResult: &testAggregateSuccessResponse,
   106  				expectedDone:    false,
   107  			},
   108  		},
   109  	}.run()
   110  
   111  	// for consistency level unstrict majority
   112  	testFetchStateWorkflow{
   113  		t:       t,
   114  		topoMap: topoMap,
   115  		level:   topology.ReadConsistencyLevelUnstrictMajority,
   116  		steps: []testFetchStateWorklowStep{
   117  			{
   118  				hostname:        "testhost1",
   119  				aggregateResult: &testAggregateSuccessResponse,
   120  			},
   121  			{
   122  				hostname:     "testhost2",
   123  				aggregateErr: errTestAggregate,
   124  			},
   125  			{
   126  				hostname:     "testhost0",
   127  				aggregateErr: errTestAggregate,
   128  				expectedDone: true,
   129  				expectedErr:  true,
   130  			},
   131  		},
   132  	}.run()
   133  
   134  	// for consistency level majority
   135  	testFetchStateWorkflow{
   136  		t:       t,
   137  		topoMap: topoMap,
   138  		level:   topology.ReadConsistencyLevelMajority,
   139  		steps: []testFetchStateWorklowStep{
   140  			{
   141  				hostname:        "testhost1",
   142  				aggregateResult: &testAggregateSuccessResponse,
   143  			},
   144  			{
   145  				hostname:        "testhost2",
   146  				aggregateResult: &testAggregateSuccessResponse,
   147  			},
   148  			{
   149  				hostname:     "testhost0",
   150  				aggregateErr: errTestAggregate,
   151  				expectedDone: true,
   152  				expectedErr:  true,
   153  			},
   154  		},
   155  	}.run()
   156  
   157  	// for consistency level unstrict all
   158  	testFetchStateWorkflow{
   159  		t:       t,
   160  		topoMap: topoMap,
   161  		level:   topology.ReadConsistencyLevelUnstrictAll,
   162  		steps: []testFetchStateWorklowStep{
   163  			{
   164  				hostname:        "testhost1",
   165  				aggregateResult: &testAggregateSuccessResponse,
   166  			},
   167  			{
   168  				hostname:        "testhost2",
   169  				aggregateResult: &testAggregateSuccessResponse,
   170  			},
   171  			{
   172  				hostname:     "testhost0",
   173  				aggregateErr: errTestAggregate,
   174  				expectedDone: true,
   175  				expectedErr:  false,
   176  			},
   177  		},
   178  	}.run()
   179  	testFetchStateWorkflow{
   180  		t:       t,
   181  		topoMap: topoMap,
   182  		level:   topology.ReadConsistencyLevelUnstrictAll,
   183  		steps: []testFetchStateWorklowStep{
   184  			{
   185  				hostname:     "testhost1",
   186  				aggregateErr: errTestAggregate,
   187  			},
   188  			{
   189  				hostname:        "testhost2",
   190  				aggregateResult: &testAggregateSuccessResponse,
   191  			},
   192  			{
   193  				hostname:     "testhost0",
   194  				aggregateErr: errTestAggregate,
   195  				expectedDone: true,
   196  				expectedErr:  false,
   197  			},
   198  		},
   199  	}.run()
   200  	testFetchStateWorkflow{
   201  		t:       t,
   202  		topoMap: topoMap,
   203  		level:   topology.ReadConsistencyLevelUnstrictAll,
   204  		steps: []testFetchStateWorklowStep{
   205  			{
   206  				hostname:     "testhost1",
   207  				aggregateErr: errTestAggregate,
   208  			},
   209  			{
   210  				hostname:     "testhost2",
   211  				aggregateErr: errTestAggregate,
   212  			},
   213  			{
   214  				hostname:     "testhost0",
   215  				aggregateErr: errTestAggregate,
   216  				expectedDone: true,
   217  				expectedErr:  true,
   218  			},
   219  		},
   220  	}.run()
   221  
   222  	// for consistency level all
   223  	testFetchStateWorkflow{
   224  		t:       t,
   225  		topoMap: topoMap,
   226  		level:   topology.ReadConsistencyLevelAll,
   227  		steps: []testFetchStateWorklowStep{
   228  			{
   229  				hostname:        "testhost1",
   230  				aggregateResult: &testAggregateSuccessResponse,
   231  			},
   232  			{
   233  				hostname:        "testhost2",
   234  				aggregateResult: &testAggregateSuccessResponse,
   235  			},
   236  			{
   237  				hostname:        "testhost0",
   238  				aggregateResult: &testAggregateSuccessResponse,
   239  				expectedDone:    true,
   240  				expectedErr:     true,
   241  			},
   242  		},
   243  	}.run()
   244  }
   245  
   246  func TestAggregateResultsAccumulatorAnyResponseShouldTerminateConsistencyLevelOneComplexTopo(t *testing.T) {
   247  	// rf=3, 30 shards total; 2 identical hosts, one additional host with a subset of all shards
   248  	topoMap := tu.MustNewTopologyMap(3, map[string][]shard.Shard{
   249  		"testhost0": tu.ShardsRange(0, 29, shard.Available),
   250  		"testhost1": tu.ShardsRange(0, 29, shard.Available),
   251  		"testhost2": tu.ShardsRange(10, 20, shard.Available),
   252  	})
   253  
   254  	// a single response from a host with partial shards isn't enough
   255  	testFetchStateWorkflow{
   256  		t:       t,
   257  		topoMap: topoMap,
   258  		level:   topology.ReadConsistencyLevelOne,
   259  		steps: []testFetchStateWorklowStep{
   260  			{
   261  				hostname:        "testhost2",
   262  				aggregateResult: &testAggregateSuccessResponse,
   263  				expectedDone:    false,
   264  			},
   265  		},
   266  	}.run()
   267  }
   268  
   269  func TestAggregateResultsAccumulatorConsistencyUnstrictMajority(t *testing.T) {
   270  	// rf=3, 30 shards total; three identical hosts
   271  	topoMap := tu.MustNewTopologyMap(3, map[string][]shard.Shard{
   272  		"testhost0": tu.ShardsRange(0, 29, shard.Available),
   273  		"testhost1": tu.ShardsRange(0, 29, shard.Available),
   274  		"testhost2": tu.ShardsRange(0, 29, shard.Available),
   275  	})
   276  
   277  	// two success responses should succeed immediately
   278  	testFetchStateWorkflow{
   279  		t:       t,
   280  		topoMap: topoMap,
   281  		level:   topology.ReadConsistencyLevelUnstrictMajority,
   282  		steps: []testFetchStateWorklowStep{
   283  			{
   284  				hostname:        "testhost0",
   285  				aggregateResult: &testAggregateSuccessResponse,
   286  				expectedDone:    false,
   287  			},
   288  			{
   289  				hostname:        "testhost1",
   290  				aggregateResult: &testAggregateSuccessResponse,
   291  				expectedDone:    true,
   292  			},
   293  		},
   294  	}.run()
   295  
   296  	// two failures, and one success response should succeed
   297  	testFetchStateWorkflow{
   298  		t:       t,
   299  		topoMap: topoMap,
   300  		level:   topology.ReadConsistencyLevelUnstrictMajority,
   301  		steps: []testFetchStateWorklowStep{
   302  			{
   303  				hostname:     "testhost0",
   304  				aggregateErr: errTestAggregate,
   305  			},
   306  			{
   307  				hostname:     "testhost1",
   308  				aggregateErr: errTestAggregate,
   309  			},
   310  			{
   311  				hostname:        "testhost1",
   312  				aggregateResult: &testAggregateSuccessResponse,
   313  				expectedDone:    true,
   314  			},
   315  		},
   316  	}.run()
   317  
   318  	// should terminate only after all failures
   319  	testFetchStateWorkflow{
   320  		t:       t,
   321  		topoMap: topoMap,
   322  		level:   topology.ReadConsistencyLevelUnstrictMajority,
   323  		steps: []testFetchStateWorklowStep{
   324  			{
   325  				hostname:     "testhost0",
   326  				aggregateErr: errTestAggregate,
   327  			},
   328  			{
   329  				hostname:     "testhost1",
   330  				aggregateErr: errTestAggregate,
   331  			},
   332  			{
   333  				hostname:     "testhost1",
   334  				aggregateErr: errTestAggregate,
   335  				expectedErr:  true,
   336  				expectedDone: true,
   337  			},
   338  		},
   339  	}.run()
   340  }
   341  
   342  func TestAggregateResultsAccumulatorConsistencyUnstrictMajorityComplexTopo(t *testing.T) {
   343  	// rf=3, 30 shards total; three identical hosts
   344  	topoMap := tu.MustNewTopologyMap(3, map[string][]shard.Shard{
   345  		"testhost0": tu.ShardsRange(0, 29, shard.Initializing),
   346  		"testhost1": tu.ShardsRange(0, 29, shard.Available),
   347  		"testhost2": tu.ShardsRange(0, 29, shard.Available),
   348  		"testhost3": tu.ShardsRange(0, 29, shard.Leaving),
   349  	})
   350  
   351  	// one success responses should succeed
   352  	testFetchStateWorkflow{
   353  		t:       t,
   354  		topoMap: topoMap,
   355  		level:   topology.ReadConsistencyLevelUnstrictMajority,
   356  		steps: []testFetchStateWorklowStep{
   357  			{
   358  				hostname:        "testhost0",
   359  				aggregateResult: &testAggregateSuccessResponse,
   360  			},
   361  			{
   362  				hostname:        "testhost1",
   363  				aggregateResult: &testAggregateSuccessResponse,
   364  			},
   365  			{
   366  				hostname:     "testhost2",
   367  				aggregateErr: errTestAggregate,
   368  			},
   369  			{
   370  				hostname:        "testhost3",
   371  				aggregateResult: &testAggregateSuccessResponse,
   372  				expectedDone:    true,
   373  			},
   374  		},
   375  	}.run()
   376  }
   377  
   378  func TestAggregateResultsAccumulatorComplextTopoUnstrictMajorityPartialResponses(t *testing.T) {
   379  	// rf=3, 30 shards total; 2 identical "complete hosts", 2 additional hosts which together comprise a "complete" host.
   380  	topoMap := tu.MustNewTopologyMap(3, map[string][]shard.Shard{
   381  		"testhost0": tu.ShardsRange(0, 29, shard.Available),
   382  		"testhost1": tu.ShardsRange(0, 29, shard.Available),
   383  		"testhost2": tu.ShardsRange(15, 29, shard.Available),
   384  		"testhost3": tu.ShardsRange(0, 14, shard.Available),
   385  	})
   386  
   387  	// response from testhost2+testhost3 should be sufficient
   388  	testFetchStateWorkflow{
   389  		t:       t,
   390  		topoMap: topoMap,
   391  		level:   topology.ReadConsistencyLevelUnstrictMajority,
   392  		steps: []testFetchStateWorklowStep{
   393  			{
   394  				hostname:        "testhost2",
   395  				aggregateResult: &testAggregateSuccessResponse,
   396  			},
   397  			{
   398  				hostname:        "testhost3",
   399  				aggregateResult: &testAggregateSuccessResponse,
   400  			},
   401  			{
   402  				hostname:     "testhost1",
   403  				aggregateErr: errTestAggregate,
   404  			},
   405  			{
   406  				hostname:     "testhost0",
   407  				aggregateErr: errTestAggregate,
   408  				expectedDone: true,
   409  			},
   410  		},
   411  	}.run()
   412  }
   413  
   414  func TestAggregateResultsAccumulatorComplexIncompleteTopoUnstrictMajorityPartialResponses(t *testing.T) {
   415  	// rf=3, 30 shards total; 2 identical "complete hosts", 2 additional hosts which do not comprise a complete host.
   416  	topoMap := tu.MustNewTopologyMap(3, map[string][]shard.Shard{
   417  		"testhost0": tu.ShardsRange(0, 29, shard.Available),
   418  		"testhost1": tu.ShardsRange(0, 29, shard.Available),
   419  		"testhost2": tu.ShardsRange(15, 27, shard.Available),
   420  		"testhost3": tu.ShardsRange(0, 14, shard.Available),
   421  	})
   422  
   423  	// response from testhost2+testhost3 should be in-sufficient, as they're not complete together
   424  	testFetchStateWorkflow{
   425  		t:       t,
   426  		topoMap: topoMap,
   427  		level:   topology.ReadConsistencyLevelUnstrictMajority,
   428  		steps: []testFetchStateWorklowStep{
   429  			{
   430  				hostname:        "testhost2",
   431  				aggregateResult: &testAggregateSuccessResponse,
   432  			},
   433  			{
   434  				hostname:        "testhost3",
   435  				aggregateResult: &testAggregateSuccessResponse,
   436  			},
   437  			{
   438  				hostname:     "testhost1",
   439  				aggregateErr: errTestAggregate,
   440  			},
   441  			{
   442  				hostname:     "testhost0",
   443  				aggregateErr: errTestAggregate,
   444  				expectedDone: true,
   445  				expectedErr:  true,
   446  			},
   447  		},
   448  	}.run()
   449  }
   450  
   451  func TestAggregateResultsAccumulatorReadConsitencyLevelMajority(t *testing.T) {
   452  	// rf=3, 30 shards total; three identical hosts
   453  	topoMap := tu.MustNewTopologyMap(3, map[string][]shard.Shard{
   454  		"testhost0": tu.ShardsRange(0, 29, shard.Available),
   455  		"testhost1": tu.ShardsRange(0, 29, shard.Available),
   456  		"testhost2": tu.ShardsRange(0, 29, shard.Available),
   457  	})
   458  
   459  	// any single success response should not satisfy consistency majority
   460  	for i := 0; i < 3; i++ {
   461  		testFetchStateWorkflow{
   462  			t:       t,
   463  			topoMap: topoMap,
   464  			level:   topology.ReadConsistencyLevelMajority,
   465  			steps: []testFetchStateWorklowStep{
   466  				{
   467  					hostname:        fmt.Sprintf("testhost%d", i),
   468  					aggregateResult: &testAggregateSuccessResponse,
   469  					expectedDone:    false,
   470  				},
   471  			},
   472  		}.run()
   473  	}
   474  
   475  	// all responses failing should fail consistency lvl majority
   476  	testFetchStateWorkflow{
   477  		t:       t,
   478  		topoMap: topoMap,
   479  		level:   topology.ReadConsistencyLevelMajority,
   480  		steps: []testFetchStateWorklowStep{
   481  			{
   482  				hostname:     "testhost0",
   483  				aggregateErr: errTestAggregate,
   484  			},
   485  			{
   486  				hostname:     "testhost1",
   487  				aggregateErr: errTestAggregate,
   488  			},
   489  			{
   490  				hostname:     "testhost2",
   491  				aggregateErr: errTestAggregate,
   492  				expectedDone: true,
   493  				expectedErr:  true,
   494  			},
   495  		},
   496  	}.run()
   497  
   498  	// any two responses failing should fail regardless of third response
   499  	testFetchStateWorkflow{
   500  		t:       t,
   501  		topoMap: topoMap,
   502  		level:   topology.ReadConsistencyLevelMajority,
   503  		steps: []testFetchStateWorklowStep{
   504  			{
   505  				hostname:     "testhost0",
   506  				aggregateErr: errTestAggregate,
   507  			},
   508  			{
   509  				hostname:        "testhost1",
   510  				aggregateResult: &testAggregateSuccessResponse,
   511  			},
   512  			{
   513  				hostname:     "testhost2",
   514  				aggregateErr: errTestAggregate,
   515  				expectedDone: true,
   516  				expectedErr:  true,
   517  			},
   518  		},
   519  	}.run()
   520  }