github.com/m3db/m3@v1.5.0/src/cluster/placement/service/mirrored_custom_groups_test.go (about)

     1  // Copyright (c) 2019 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package service
    22  
    23  import (
    24  	"fmt"
    25  	"math/rand"
    26  	"sync/atomic"
    27  	"testing"
    28  
    29  	"github.com/m3db/m3/src/cluster/kv"
    30  	"github.com/m3db/m3/src/cluster/kv/mem"
    31  	"github.com/m3db/m3/src/cluster/placement"
    32  	"github.com/m3db/m3/src/cluster/placement/selector"
    33  	placementstorage "github.com/m3db/m3/src/cluster/placement/storage"
    34  	"github.com/m3db/m3/src/cluster/shard"
    35  
    36  	"github.com/stretchr/testify/assert"
    37  	"github.com/stretchr/testify/require"
    38  	"go.uber.org/zap"
    39  )
    40  
    41  const (
    42  	zone          = "zone1"
    43  	defaultWeight = 1
    44  	numShards     = 12
    45  	rf            = 2
    46  )
    47  
    48  // fixtures
    49  const (
    50  	// format: <groupID>_<instanceID>
    51  	instG1I1 = "g1_i1"
    52  	instG1I2 = "g1_i2"
    53  	instG1I3 = "g1_i3"
    54  	instG1I4 = "g1_i4"
    55  
    56  	instG2I1 = "g2_i1"
    57  	instG2I2 = "g2_i2"
    58  
    59  	instG3I1 = "g3_i1"
    60  	instG3I2 = "g3_i2"
    61  	instG3I3 = "g3_i3"
    62  )
    63  
    64  var (
    65  	testGroups = map[string]string{
    66  		instG1I2: "group1",
    67  		instG1I1: "group1",
    68  
    69  		// for replacement
    70  		instG1I3: "group1",
    71  		instG1I4: "group1",
    72  
    73  		instG2I1: "group2",
    74  		instG2I2: "group2",
    75  
    76  		// additional instances
    77  		instG3I1: "group3",
    78  		instG3I2: "group3",
    79  		instG3I3: "group3",
    80  	}
    81  )
    82  
    83  var (
    84  	logger, _ = zap.NewDevelopment()
    85  )
    86  
    87  // TestExplicitMirroredCustomGroupSelector contains functional tests starting at the placement.Service level.
    88  func TestExplicitMirroredCustomGroupSelector(t *testing.T) {
    89  	mustBuildInitialPlacement := func(
    90  		t *testing.T, tctx *mirroredCustomGroupSelectorTestContext) placement.Placement {
    91  		p, err := tctx.Service.BuildInitialPlacement(tctx.Instances, numShards, rf)
    92  		require.NoError(t, err)
    93  		assertPlacementRespectsGroups(t, p, tctx.Groups)
    94  		return p
    95  	}
    96  
    97  	t.Run("BuildInitialPlacement", func(t *testing.T) {
    98  		tctx := mirroredCustomGroupSelectorSetup(t)
    99  
   100  		mustBuildInitialPlacement(t, tctx)
   101  	})
   102  
   103  	t.Run("BuildInitialPlacement insufficient nodes", func(t *testing.T) {
   104  		tctx := mirroredCustomGroupSelectorSetup(t)
   105  		_, err := tctx.Service.BuildInitialPlacement([]placement.Instance{
   106  			newInstanceWithID(instG1I1),
   107  		}, numShards, rf)
   108  		assert.EqualError(t, err, "found 1 count of shard set id 1, expecting 2")
   109  		// assertPlacementRespectsGroups(t, p, tctx.Groups)
   110  	})
   111  
   112  	t.Run("BuildInitialPlacement given too many nodes should use only RF", func(t *testing.T) {
   113  		tctx := mirroredCustomGroupSelectorSetup(t)
   114  		p, err := tctx.Service.BuildInitialPlacement([]placement.Instance{
   115  			newInstanceWithID(instG1I1),
   116  			newInstanceWithID(instG1I2),
   117  			newInstanceWithID(instG1I3),
   118  			newInstanceWithID(instG1I4),
   119  		}, numShards, rf)
   120  		require.NoError(t, err)
   121  		assertPlacementRespectsGroups(t, p, tctx.Groups)
   122  	})
   123  
   124  	t.Run("AddInstances", func(t *testing.T) {
   125  		tctx := mirroredCustomGroupSelectorSetup(t)
   126  		mustBuildInitialPlacement(t, tctx)
   127  
   128  		toAdd := []placement.Instance{
   129  			newInstanceWithID(instG3I1),
   130  			newInstanceWithID(instG3I2),
   131  		}
   132  
   133  		p, addedInstances, err := tctx.Service.AddInstances(toAdd)
   134  		require.NoError(t, err)
   135  		assert.Equal(t, toAdd, addedInstances)
   136  		assertPlacementRespectsGroups(t, p, tctx.Groups)
   137  	})
   138  
   139  	t.Run("AddInstances given too many nodes should use only RF", func(t *testing.T) {
   140  		tctx := mirroredCustomGroupSelectorSetup(t)
   141  		mustBuildInitialPlacement(t, tctx)
   142  
   143  		toAdd := []placement.Instance{
   144  			newInstanceWithID(instG3I1),
   145  			newInstanceWithID(instG3I2),
   146  			newInstanceWithID(instG3I3),
   147  		}
   148  
   149  		p, addedInstances, err := tctx.Service.AddInstances(toAdd)
   150  		require.NoError(t, err)
   151  		assert.Len(t, addedInstances, p.ReplicaFactor())
   152  		assertPlacementRespectsGroups(t, p, tctx.Groups)
   153  	})
   154  
   155  	t.Run("ReplaceInstances", func(t *testing.T) {
   156  		tctx := mirroredCustomGroupSelectorSetup(t)
   157  		mustBuildInitialPlacement(t, tctx)
   158  
   159  		group1Instance := newInstanceWithID(instG1I3)
   160  		candidates := []placement.Instance{
   161  			group1Instance,
   162  			newInstanceWithID(instG3I1),
   163  		}
   164  		p, usedInstances, err := tctx.Service.ReplaceInstances([]string{instG1I1}, candidates)
   165  		require.NoError(t, err)
   166  		require.Len(t, usedInstances, 1)
   167  		assert.Equal(t, group1Instance.ID(), usedInstances[0].ID())
   168  		assertPlacementRespectsGroups(t, p, tctx.Groups)
   169  	})
   170  
   171  	// N.B.: at time of this writing this technically doesn't need grouping, but check that
   172  	// the grouping is respected anyhow.
   173  	t.Run("RemoveInstances", func(t *testing.T) {
   174  		tctx := mirroredCustomGroupSelectorSetup(t)
   175  		mustBuildInitialPlacement(t, tctx)
   176  
   177  		p, err := tctx.Service.RemoveInstances([]string{instG1I1, instG1I2})
   178  		require.NoError(t, err)
   179  
   180  		assertPlacementRespectsGroups(t, p, tctx.Groups)
   181  	})
   182  }
   183  
   184  type mirroredCustomGroupSelectorTestContext struct {
   185  	KVStore   kv.Store
   186  	Opts      placement.Options
   187  	Storage   placement.Storage
   188  	Service   placement.Service
   189  	Selector  placement.InstanceSelector
   190  	Instances []placement.Instance
   191  	Groups    map[string]string
   192  }
   193  
   194  func mirroredCustomGroupSelectorSetup(t *testing.T) *mirroredCustomGroupSelectorTestContext {
   195  	tctx := &mirroredCustomGroupSelectorTestContext{}
   196  
   197  	tctx.Instances = []placement.Instance{
   198  		newInstanceWithID(instG1I1),
   199  		newInstanceWithID(instG2I1),
   200  		newInstanceWithID(instG1I2),
   201  		newInstanceWithID(instG2I2),
   202  	}
   203  
   204  	tctx.Groups = testGroups
   205  
   206  	opts := placement.NewOptions().
   207  		SetValidZone(zone).
   208  		SetIsMirrored(true)
   209  
   210  	tctx.Selector = selector.NewMirroredCustomGroupSelector(
   211  		selector.NewMapInstanceGroupIDFunc(tctx.Groups),
   212  		opts,
   213  	)
   214  
   215  	tctx.Opts = opts.SetInstanceSelector(tctx.Selector)
   216  
   217  	tctx.KVStore = mem.NewStore()
   218  	tctx.Storage = placementstorage.NewPlacementStorage(tctx.KVStore, "placement", tctx.Opts)
   219  	tctx.Service = NewPlacementService(tctx.Storage, WithPlacementOptions(tctx.Opts))
   220  	return tctx
   221  }
   222  
   223  // assertGroupsAreRespected checks that each group in the given group map has:
   224  //   - the same shards assigned
   225  //   - the same shardset ID
   226  //
   227  // Note: shard comparison ignores SourceID, as this is expected to differ between instances in
   228  // a group (new instances take shards from different replicas in an existing group)
   229  func assertPlacementRespectsGroups(t *testing.T, p placement.Placement, groups map[string]string) bool {
   230  	// check that the groups are respected.
   231  	instancesByGroupID := make(map[string][]placement.Instance, len(groups))
   232  
   233  	for _, inst := range p.Instances() {
   234  		groupID, ok := groups[inst.ID()]
   235  		if !assert.True(t, ok, "instance %s has no group", inst.ID()) {
   236  			return false
   237  		}
   238  		instancesByGroupID[groupID] = append(instancesByGroupID[groupID], inst)
   239  	}
   240  
   241  	rtn := true
   242  	for groupID, groupInsts := range instancesByGroupID {
   243  		// make sure all shards are the same.
   244  		if !assert.True(t, len(groupInsts) >= 1, "groupID %s", groupID) {
   245  			continue
   246  		}
   247  		compareInst := groupInsts[0]
   248  		for _, inst := range groupInsts[1:] {
   249  			instIDs := []string{inst.ID(), compareInst.ID()}
   250  
   251  			rtn = rtn && assert.Equal(t,
   252  				shardIDs(compareInst.Shards()), shardIDs(inst.Shards()),
   253  				"%s (actual) != %s (expected) for instances %v",
   254  				inst.Shards(), compareInst.Shards(),
   255  				instIDs)
   256  			rtn = rtn && assert.Equal(t,
   257  				compareInst.ShardSetID(), inst.ShardSetID(),
   258  				"shard not equal for instances %v", instIDs)
   259  		}
   260  	}
   261  	return rtn
   262  }
   263  
   264  func shardIDs(ss shard.Shards) []uint32 {
   265  	ids := make([]uint32, 0, len(ss.All()))
   266  	for _, s := range ss.All() {
   267  		ids = append(ids, s.ID())
   268  	}
   269  	return ids
   270  }
   271  
   272  func newTestInstance() placement.Instance {
   273  	return newInstanceWithID(nextTestInstanceID())
   274  }
   275  
   276  func newInstanceWithID(id string) placement.Instance {
   277  	return placement.NewEmptyInstance(
   278  		id,
   279  		nextTestIsolationGroup(),
   280  		zone,
   281  		fmt.Sprintf("localhost:%d", randPort()),
   282  		defaultWeight,
   283  	)
   284  }
   285  
   286  var curInstanceNo int64
   287  
   288  // Uses global state; factor into a factory object if you need guarantees about the specific results.
   289  func nextTestInstanceID() string {
   290  	myNumber := atomic.AddInt64(&curInstanceNo, 1)
   291  	return fmt.Sprintf("instance-%d", myNumber)
   292  }
   293  
   294  // completely random valid port, not necessarily open.
   295  func randPort() int {
   296  	return rand.Intn(1 << 16)
   297  }
   298  
   299  var curISOGroup int64
   300  
   301  // Uses global state; factor into a factory object if you need guarantees about the specific results.
   302  func nextTestIsolationGroup() string {
   303  	myGroup := atomic.AddInt64(&curISOGroup, 1)
   304  	return fmt.Sprintf("iso-group-%d", myGroup)
   305  }