github.com/m3db/m3@v1.5.0/src/dbnode/integration/peers_bootstrap_none_available_test.go (about) 1 // +build integration 2 3 // Copyright (c) 2018 Uber Technologies, Inc. 4 // 5 // Permission is hereby granted, free of charge, to any person obtaining a copy 6 // of this software and associated documentation files (the "Software"), to deal 7 // in the Software without restriction, including without limitation the rights 8 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 // copies of the Software, and to permit persons to whom the Software is 10 // furnished to do so, subject to the following conditions: 11 // 12 // The above copyright notice and this permission notice shall be included in 13 // all copies or substantial portions of the Software. 14 // 15 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 // THE SOFTWARE. 22 23 package integration 24 25 import ( 26 "testing" 27 "time" 28 29 "github.com/stretchr/testify/assert" 30 "github.com/stretchr/testify/require" 31 32 "github.com/m3db/m3/src/cluster/services" 33 "github.com/m3db/m3/src/cluster/shard" 34 "github.com/m3db/m3/src/dbnode/namespace" 35 "github.com/m3db/m3/src/dbnode/retention" 36 "github.com/m3db/m3/src/dbnode/sharding" 37 "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/uninitialized" 38 "github.com/m3db/m3/src/dbnode/topology" 39 "github.com/m3db/m3/src/dbnode/topology/testutil" 40 xtest "github.com/m3db/m3/src/x/test" 41 ) 42 43 // TestPeersBootstrapNoneAvailable makes sure that a cluster with the peer bootstrapper enabled can still turn on 44 // if none of the nodes peers shard states are available or leaving. 45 func TestPeersBootstrapNoneAvailable(t *testing.T) { 46 if testing.Short() { 47 t.SkipNow() 48 } 49 50 // Test setups 51 log := xtest.NewLogger(t) 52 retentionOpts := retention.NewOptions(). 53 SetRetentionPeriod(20 * time.Hour). 54 SetBlockSize(2 * time.Hour). 55 SetBufferPast(10 * time.Minute). 56 SetBufferFuture(2 * time.Minute) 57 namesp, err := namespace.NewMetadata(testNamespaces[0], namespace.NewOptions().SetRetentionOptions(retentionOpts)) 58 require.NoError(t, err) 59 opts := NewTestOptions(t). 60 SetNamespaces([]namespace.Metadata{namesp}). 61 // Use TChannel clients for writing / reading because we want to target individual nodes at a time 62 // and not write/read all nodes in the cluster. 63 SetUseTChannelClientForWriting(true). 64 SetUseTChannelClientForReading(true) 65 66 minShard := uint32(0) 67 maxShard := uint32(opts.NumShards()) - uint32(1) 68 start := []services.ServiceInstance{ 69 node(t, 0, newClusterShardsRange(minShard, maxShard, shard.Initializing)), 70 node(t, 1, newClusterShardsRange(minShard, maxShard, shard.Initializing)), 71 } 72 73 hostShardSets := []topology.HostShardSet{} 74 for _, instance := range start { 75 h, err := topology.NewHostShardSetFromServiceInstance(instance, sharding.DefaultHashFn(int(maxShard))) 76 require.NoError(t, err) 77 hostShardSets = append(hostShardSets, h) 78 } 79 80 shards := testutil.ShardsRange(minShard, maxShard, shard.Initializing) 81 shardSet, err := sharding.NewShardSet( 82 shards, 83 sharding.DefaultHashFn(int(maxShard)), 84 ) 85 require.NoError(t, err) 86 87 topoOpts := topology.NewStaticOptions(). 88 SetReplicas(2). 89 SetHostShardSets(hostShardSets). 90 SetShardSet(shardSet) 91 topoInit := topology.NewStaticInitializer(topoOpts) 92 93 setupOpts := []BootstrappableTestSetupOptions{ 94 { 95 DisablePeersBootstrapper: false, 96 TopologyInitializer: topoInit, 97 FinalBootstrapper: uninitialized.UninitializedTopologyBootstrapperName, 98 }, 99 { 100 DisablePeersBootstrapper: false, 101 TopologyInitializer: topoInit, 102 FinalBootstrapper: uninitialized.UninitializedTopologyBootstrapperName, 103 }, 104 } 105 setups, closeFn := NewDefaultBootstrappableTestSetups(t, opts, setupOpts) 106 defer closeFn() 107 108 setups.parallel(func(s TestSetup) { 109 require.NoError(t, s.StartServer()) 110 }) 111 log.Debug("servers are now up") 112 113 for i, s := range setups { 114 assert.True(t, s.ServerIsBootstrapped(), "setups[%v] should be bootstrapped", i) 115 } 116 117 // Stop the servers 118 defer func() { 119 setups.parallel(func(s TestSetup) { 120 require.NoError(t, s.StopServer()) 121 }) 122 log.Debug("servers are now down") 123 }() 124 }