github.com/m3db/m3@v1.5.0/src/integration/resources/inprocess/cluster_test.go (about) 1 // +build test_harness 2 // Copyright (c) 2021 Uber Technologies, Inc. 3 // 4 // Permission is hereby granted, free of charge, to any person obtaining a copy 5 // of this software and associated documentation files (the "Software"), to deal 6 // in the Software without restriction, including without limitation the rights 7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 8 // copies of the Software, and to permit persons to whom the Software is 9 // furnished to do so, subject to the following conditions: 10 // 11 // The above copyright notice and this permission notice shall be included in 12 // all copies or substantial portions of the Software. 13 // 14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 20 // THE SOFTWARE. 21 22 package inprocess 23 24 import ( 25 "errors" 26 "testing" 27 28 "github.com/m3db/m3/src/aggregator/aggregator" 29 "github.com/m3db/m3/src/integration/resources" 30 "github.com/stretchr/testify/require" 31 ) 32 33 func TestNewCluster(t *testing.T) { 34 configs, err := NewClusterConfigsFromYAML(clusterDBNodeConfig, clusterCoordConfig, "") 35 require.NoError(t, err) 36 37 m3, err := NewCluster(configs, resources.ClusterOptions{ 38 DBNode: &resources.DBNodeClusterOptions{ 39 RF: 3, 40 NumInstances: 1, 41 NumShards: 4, 42 NumIsolationGroups: 3, 43 }, 44 }) 45 require.NoError(t, err) 46 require.NoError(t, m3.Nodes().WaitForHealthy()) 47 require.NoError(t, m3.Cleanup()) 48 } 49 50 func TestNewSingleNodeCluster(t *testing.T) { 51 configs, err := NewClusterConfigsFromYAML(clusterDBNodeConfig, clusterCoordConfig, "") 52 require.NoError(t, err) 53 54 m3, err := NewCluster(configs, resources.ClusterOptions{ 55 DBNode: resources.NewDBNodeClusterOptions(), 56 }) 57 require.NoError(t, err) 58 require.NoError(t, m3.Nodes().WaitForHealthy()) 59 require.NoError(t, m3.Cleanup()) 60 } 61 62 func TestNewClusterWithAgg(t *testing.T) { 63 configs, err := NewClusterConfigsFromYAML(clusterDBNodeConfig, aggregatorCoordConfig, defaultAggregatorConfig) 64 require.NoError(t, err) 65 66 aggClusterOpts := resources.NewAggregatorClusterOptions() 67 aggClusterOpts.NumInstances = 2 68 m3, err := NewCluster(configs, resources.ClusterOptions{ 69 DBNode: resources.NewDBNodeClusterOptions(), 70 Aggregator: aggClusterOpts, 71 }) 72 require.NoError(t, err) 73 require.NoError(t, m3.Nodes().WaitForHealthy()) 74 require.NoError(t, m3.Aggregators().WaitForHealthy()) 75 require.NoError(t, m3.Cleanup()) 76 } 77 78 func TestNewClusterWithMultiAggs(t *testing.T) { 79 t.Skip("disabled as check for leader does not succeed when run as part " + 80 "of the test suite. succeeds when run individually") 81 82 configs, err := NewClusterConfigsFromYAML(clusterDBNodeConfig, aggregatorCoordConfig, defaultAggregatorConfig) 83 require.NoError(t, err) 84 85 aggClusterOpts := &resources.AggregatorClusterOptions{ 86 RF: 2, 87 NumShards: 4, 88 NumInstances: 2, 89 NumIsolationGroups: 2, 90 } 91 m3, err := NewCluster(configs, resources.ClusterOptions{ 92 DBNode: resources.NewDBNodeClusterOptions(), 93 Aggregator: aggClusterOpts, 94 }) 95 require.NoError(t, err) 96 require.NoError(t, m3.Nodes().WaitForHealthy()) 97 require.NoError(t, m3.Aggregators().WaitForHealthy()) 98 99 // wait for a leader aggregator 100 require.NoError(t, resources.Retry(func() error { 101 for _, agg := range m3.Aggregators() { 102 status, err := agg.Status() 103 if err != nil { 104 return err 105 } 106 if status.FlushStatus.ElectionState == aggregator.LeaderState { 107 return nil 108 } 109 } 110 return errors.New("no leader") 111 })) 112 113 // the leader resigns 114 leaderIdx := -1 115 for i, agg := range m3.Aggregators() { 116 status, err := agg.Status() 117 require.NoError(t, err) 118 if status.FlushStatus.ElectionState == aggregator.LeaderState { 119 verifyAggStatus(t, m3.Aggregators()[i], aggregator.LeaderState) 120 verifyAggStatus(t, m3.Aggregators()[1-i], aggregator.FollowerState) 121 require.NoError(t, agg.Resign()) 122 leaderIdx = i 123 break 124 } 125 } 126 127 // wait for the other instance to lead 128 require.NoError(t, resources.Retry(func() error { 129 newStatus, err := m3.Aggregators()[leaderIdx].Status() 130 if err != nil { 131 return err 132 } 133 if newStatus.FlushStatus.ElectionState != aggregator.FollowerState { 134 return errors.New("follower state expected after resigning") 135 } 136 return nil 137 })) 138 verifyAggStatus(t, m3.Aggregators()[leaderIdx], aggregator.FollowerState) 139 verifyAggStatus(t, m3.Aggregators()[1-leaderIdx], aggregator.LeaderState) 140 141 require.NoError(t, m3.Cleanup()) 142 } 143 144 func verifyAggStatus(t *testing.T, agg resources.Aggregator, expectedState aggregator.ElectionState) { 145 status, err := agg.Status() 146 require.NoError(t, err) 147 require.Equal(t, expectedState, status.FlushStatus.ElectionState) 148 } 149 150 const clusterDBNodeConfig = ` 151 db: 152 writeNewSeriesAsync: false 153 ` 154 155 const clusterCoordConfig = ` 156 clusters: 157 - namespaces: 158 - namespace: default 159 type: unaggregated 160 retention: 1h 161 `