github.com/hechain20/hechain@v0.0.0-20220316014945-b544036ba106/orderer/common/cluster/rpc_test.go (about) 1 /* 2 Copyright hechain. 2017 All Rights Reserved. 3 4 SPDX-License-Identifier: Apache-2.0 5 */ 6 7 package cluster_test 8 9 import ( 10 "context" 11 "io" 12 "sync" 13 "sync/atomic" 14 "testing" 15 "time" 16 17 "github.com/hechain20/hechain/common/flogging" 18 "github.com/hechain20/hechain/common/metrics/disabled" 19 "github.com/hechain20/hechain/orderer/common/cluster" 20 "github.com/hechain20/hechain/orderer/common/cluster/mocks" 21 "github.com/hyperledger/fabric-protos-go/common" 22 "github.com/hyperledger/fabric-protos-go/orderer" 23 "github.com/pkg/errors" 24 "github.com/stretchr/testify/mock" 25 "github.com/stretchr/testify/require" 26 "google.golang.org/grpc" 27 ) 28 29 func noopReport(_ error) { 30 } 31 32 func TestSendSubmitWithReport(t *testing.T) { 33 t.Parallel() 34 node1 := newTestNode(t) 35 node2 := newTestNode(t) 36 37 var receptionWaitGroup sync.WaitGroup 38 receptionWaitGroup.Add(1) 39 node2.handler.On("OnSubmit", testChannel, mock.Anything, mock.Anything).Return(nil).Run(func(args mock.Arguments) { 40 receptionWaitGroup.Done() 41 }) 42 43 defer node1.stop() 44 defer node2.stop() 45 46 config := []cluster.RemoteNode{node1.nodeInfo, node2.nodeInfo} 47 node1.c.Configure(testChannel, config) 48 node2.c.Configure(testChannel, config) 49 50 node1RPC := &cluster.RPC{ 51 Logger: flogging.MustGetLogger("test"), 52 Timeout: time.Hour, 53 StreamsByType: cluster.NewStreamsByType(), 54 Channel: testChannel, 55 Comm: node1.c, 56 } 57 58 // Wait for connections to be established 59 time.Sleep(time.Second * 5) 60 61 err := node1RPC.SendSubmit(node2.nodeInfo.ID, &orderer.SubmitRequest{Channel: testChannel, Payload: &common.Envelope{Payload: []byte("1")}}, noopReport) 62 require.NoError(t, err) 63 receptionWaitGroup.Wait() // Wait for message to be received 64 65 // Restart the node 66 node2.stop() 67 node2.resurrect() 68 69 /* 70 * allow the node2 to restart completely 71 * if restart not complete, the existing stream able to successfully send 72 * the next SubmitRequest which makes the testcase fails. Hence this delay 73 * required 74 */ 75 time.Sleep(time.Second * 5) 76 77 var wg2 sync.WaitGroup 78 wg2.Add(1) 79 80 reportSubmitFailed := func(err error) { 81 defer wg2.Done() 82 require.EqualError(t, err, io.EOF.Error()) 83 } 84 85 err = node1RPC.SendSubmit(node2.nodeInfo.ID, &orderer.SubmitRequest{Channel: testChannel, Payload: &common.Envelope{Payload: []byte("2")}}, reportSubmitFailed) 86 require.NoError(t, err) 87 88 wg2.Wait() 89 90 // Ensure stale stream is cleaned up and removed from the mapping 91 require.Len(t, node1RPC.StreamsByType[cluster.SubmitOperation], 0) 92 93 // Wait for connection to be re-established 94 time.Sleep(time.Second * 5) 95 96 // Send again, this time it should be received 97 receptionWaitGroup.Add(1) 98 err = node1RPC.SendSubmit(node2.nodeInfo.ID, &orderer.SubmitRequest{Channel: testChannel, Payload: &common.Envelope{Payload: []byte("3")}}, noopReport) 99 require.NoError(t, err) 100 receptionWaitGroup.Wait() 101 } 102 103 func TestRPCChangeDestination(t *testing.T) { 104 // We send a Submit() to 2 different nodes - 1 and 2. 105 // The first invocation of Submit() establishes a stream with node 1 106 // and the second establishes a stream with node 2. 107 // We define a mock behavior for only a single invocation of Send() on each 108 // of the streams (to node 1 and to node 2), therefore we test that invocation 109 // of rpc.SendSubmit to node 2 doesn't send the message to node 1. 110 comm := &mocks.Communicator{} 111 112 client1 := &mocks.ClusterClient{} 113 client2 := &mocks.ClusterClient{} 114 115 metrics := cluster.NewMetrics(&disabled.Provider{}) 116 117 comm.On("Remote", "mychannel", uint64(1)).Return(&cluster.RemoteContext{ 118 SendBuffSize: 10, 119 Metrics: metrics, 120 Logger: flogging.MustGetLogger("test"), 121 Client: client1, 122 ProbeConn: func(_ *grpc.ClientConn) error { return nil }, 123 }, nil) 124 comm.On("Remote", "mychannel", uint64(2)).Return(&cluster.RemoteContext{ 125 SendBuffSize: 10, 126 Metrics: metrics, 127 Logger: flogging.MustGetLogger("test"), 128 Client: client2, 129 ProbeConn: func(_ *grpc.ClientConn) error { return nil }, 130 }, nil) 131 132 streamToNode1 := &mocks.StepClient{} 133 streamToNode2 := &mocks.StepClient{} 134 streamToNode1.On("Context", mock.Anything).Return(context.Background()) 135 streamToNode2.On("Context", mock.Anything).Return(context.Background()) 136 137 client1.On("Step", mock.Anything).Return(streamToNode1, nil).Once() 138 client2.On("Step", mock.Anything).Return(streamToNode2, nil).Once() 139 140 rpc := &cluster.RPC{ 141 Logger: flogging.MustGetLogger("test"), 142 Timeout: time.Hour, 143 StreamsByType: cluster.NewStreamsByType(), 144 Channel: "mychannel", 145 Comm: comm, 146 } 147 148 var sent sync.WaitGroup 149 sent.Add(2) 150 151 signalSent := func(_ mock.Arguments) { 152 sent.Done() 153 } 154 streamToNode1.On("Send", mock.Anything).Return(nil).Run(signalSent).Once() 155 streamToNode2.On("Send", mock.Anything).Return(nil).Run(signalSent).Once() 156 streamToNode1.On("Recv").Return(nil, io.EOF) 157 streamToNode2.On("Recv").Return(nil, io.EOF) 158 159 rpc.SendSubmit(1, &orderer.SubmitRequest{Channel: "mychannel"}, noopReport) 160 rpc.SendSubmit(2, &orderer.SubmitRequest{Channel: "mychannel"}, noopReport) 161 162 sent.Wait() 163 streamToNode1.AssertNumberOfCalls(t, "Send", 1) 164 streamToNode2.AssertNumberOfCalls(t, "Send", 1) 165 } 166 167 func TestSend(t *testing.T) { 168 submitRequest := &orderer.SubmitRequest{Channel: "mychannel"} 169 submitResponse := &orderer.StepResponse{ 170 Payload: &orderer.StepResponse_SubmitRes{ 171 SubmitRes: &orderer.SubmitResponse{Status: common.Status_SUCCESS}, 172 }, 173 } 174 175 consensusRequest := &orderer.ConsensusRequest{ 176 Channel: "mychannel", 177 } 178 179 submitReq := wrapSubmitReq(submitRequest) 180 181 consensusReq := &orderer.StepRequest{ 182 Payload: &orderer.StepRequest_ConsensusRequest{ 183 ConsensusRequest: consensusRequest, 184 }, 185 } 186 187 submit := func(rpc *cluster.RPC) error { 188 err := rpc.SendSubmit(1, submitRequest, noopReport) 189 return err 190 } 191 192 step := func(rpc *cluster.RPC) error { 193 return rpc.SendConsensus(1, consensusRequest) 194 } 195 196 type testCase struct { 197 name string 198 method func(rpc *cluster.RPC) error 199 sendReturns error 200 sendCalledWith *orderer.StepRequest 201 receiveReturns []interface{} 202 stepReturns []interface{} 203 remoteError error 204 expectedErr string 205 } 206 207 l := &sync.Mutex{} 208 var tst testCase 209 210 sent := make(chan struct{}) 211 212 var sendCalls uint32 213 214 stream := &mocks.StepClient{} 215 stream.On("Context", mock.Anything).Return(context.Background()) 216 stream.On("Send", mock.Anything).Return(func(*orderer.StepRequest) error { 217 l.Lock() 218 defer l.Unlock() 219 atomic.AddUint32(&sendCalls, 1) 220 sent <- struct{}{} 221 return tst.sendReturns 222 }) 223 224 for _, tst := range []testCase{ 225 { 226 name: "Send and Receive submit succeed", 227 method: submit, 228 sendReturns: nil, 229 stepReturns: []interface{}{stream, nil}, 230 receiveReturns: []interface{}{submitResponse, nil}, 231 sendCalledWith: submitReq, 232 }, 233 { 234 name: "Send step succeed", 235 method: step, 236 sendReturns: nil, 237 stepReturns: []interface{}{stream, nil}, 238 sendCalledWith: consensusReq, 239 }, 240 { 241 name: "Send submit fails", 242 method: submit, 243 sendReturns: errors.New("oops"), 244 stepReturns: []interface{}{stream, nil}, 245 sendCalledWith: submitReq, 246 expectedErr: "stream is aborted", 247 }, 248 { 249 name: "Send step fails", 250 method: step, 251 sendReturns: errors.New("oops"), 252 stepReturns: []interface{}{stream, nil}, 253 sendCalledWith: consensusReq, 254 expectedErr: "stream is aborted", 255 }, 256 { 257 name: "Remote() fails", 258 method: submit, 259 remoteError: errors.New("timed out"), 260 stepReturns: []interface{}{stream, nil}, 261 expectedErr: "timed out", 262 }, 263 { 264 name: "Submit fails with Send", 265 method: submit, 266 stepReturns: []interface{}{nil, errors.New("deadline exceeded")}, 267 expectedErr: "deadline exceeded", 268 }, 269 } { 270 l.Lock() 271 testCase := tst 272 l.Unlock() 273 274 t.Run(testCase.name, func(t *testing.T) { 275 atomic.StoreUint32(&sendCalls, 0) 276 isSend := testCase.receiveReturns == nil 277 comm := &mocks.Communicator{} 278 client := &mocks.ClusterClient{} 279 client.On("Step", mock.Anything).Return(testCase.stepReturns...) 280 rm := &cluster.RemoteContext{ 281 Metrics: cluster.NewMetrics(&disabled.Provider{}), 282 SendBuffSize: 1, 283 Logger: flogging.MustGetLogger("test"), 284 ProbeConn: func(_ *grpc.ClientConn) error { return nil }, 285 Client: client, 286 } 287 defer rm.Abort() 288 comm.On("Remote", "mychannel", uint64(1)).Return(rm, testCase.remoteError) 289 290 rpc := &cluster.RPC{ 291 Logger: flogging.MustGetLogger("test"), 292 Timeout: time.Hour, 293 StreamsByType: cluster.NewStreamsByType(), 294 Channel: "mychannel", 295 Comm: comm, 296 } 297 298 err := testCase.method(rpc) 299 if testCase.remoteError == nil && testCase.stepReturns[1] == nil { 300 <-sent 301 } 302 303 if testCase.stepReturns[1] == nil && testCase.remoteError == nil { 304 require.NoError(t, err) 305 } else { 306 require.EqualError(t, err, testCase.expectedErr) 307 } 308 309 if testCase.remoteError == nil && testCase.expectedErr == "" && isSend { 310 stream.AssertCalled(t, "Send", testCase.sendCalledWith) 311 // Ensure that if we succeeded - only 1 stream was created despite 2 calls 312 // to Send() were made 313 err := testCase.method(rpc) 314 <-sent 315 316 require.NoError(t, err) 317 require.Equal(t, 2, int(atomic.LoadUint32(&sendCalls))) 318 client.AssertNumberOfCalls(t, "Step", 1) 319 } 320 }) 321 } 322 } 323 324 func TestRPCGarbageCollection(t *testing.T) { 325 // Scenario: Send a message to a remote node, and establish a stream 326 // while doing it. 327 // Afterwards - make that stream be aborted, and send a message to a different 328 // remote node. 329 // The first stream should be cleaned from the mapping. 330 331 comm := &mocks.Communicator{} 332 client := &mocks.ClusterClient{} 333 stream := &mocks.StepClient{} 334 335 remote := &cluster.RemoteContext{ 336 SendBuffSize: 10, 337 Metrics: cluster.NewMetrics(&disabled.Provider{}), 338 Logger: flogging.MustGetLogger("test"), 339 Client: client, 340 ProbeConn: func(_ *grpc.ClientConn) error { return nil }, 341 } 342 343 var sent sync.WaitGroup 344 345 defineMocks := func(destination uint64) { 346 sent.Add(1) 347 comm.On("Remote", "mychannel", destination).Return(remote, nil) 348 stream.On("Context", mock.Anything).Return(context.Background()) 349 client.On("Step", mock.Anything).Return(stream, nil).Once() 350 stream.On("Send", mock.Anything).Return(nil).Once().Run(func(_ mock.Arguments) { 351 sent.Done() 352 }) 353 stream.On("Recv").Return(nil, nil) 354 } 355 356 mapping := cluster.NewStreamsByType() 357 358 rpc := &cluster.RPC{ 359 Logger: flogging.MustGetLogger("test"), 360 Timeout: time.Hour, 361 StreamsByType: mapping, 362 Channel: "mychannel", 363 Comm: comm, 364 } 365 366 defineMocks(1) 367 368 rpc.SendSubmit(1, &orderer.SubmitRequest{Channel: "mychannel"}, noopReport) 369 // Wait for the message to arrive 370 sent.Wait() 371 // Ensure the stream is initialized in the mapping 372 require.Len(t, mapping[cluster.SubmitOperation], 1) 373 require.Equal(t, uint64(1), mapping[cluster.SubmitOperation][1].ID) 374 // And the underlying gRPC stream indeed had Send invoked on it. 375 stream.AssertNumberOfCalls(t, "Send", 1) 376 377 // Abort all streams we currently have that are associated to the remote. 378 remote.Abort() 379 380 // The stream still exists, as it is not cleaned yet. 381 require.Len(t, mapping[cluster.SubmitOperation], 1) 382 require.Equal(t, uint64(1), mapping[cluster.SubmitOperation][1].ID) 383 384 // Prepare for the next transmission. 385 defineMocks(2) 386 387 // Send a message to a different node. 388 rpc.SendSubmit(2, &orderer.SubmitRequest{Channel: "mychannel"}, noopReport) 389 // The mapping should be now cleaned from the previous stream. 390 require.Len(t, mapping[cluster.SubmitOperation], 1) 391 require.Equal(t, uint64(2), mapping[cluster.SubmitOperation][2].ID) 392 }