github.com/cranelv/ethereum_mpc@v0.0.0-20191031014521-23aeb1415092/consensus_pbft/pbft/batch_test.go (about)

     1  /*
     2  Copyright IBM Corp. 2016 All Rights Reserved.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8  		 http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package pbft
    18  
    19  import (
    20  	"testing"
    21  	"time"
    22  	"github.com/ethereum/go-ethereum/consensus_pbft/params"
    23  	"github.com/ethereum/go-ethereum/consensus_pbft"
    24  	"github.com/ethereum/go-ethereum/consensus_pbft/pbftTypes"
    25  	"github.com/ethereum/go-ethereum/consensus_pbft/message"
    26  	"github.com/ethereum/go-ethereum/consensus_pbft/util/events"
    27  	"github.com/ethereum/go-ethereum/consensus_pbft/singletons"
    28  	fmt "fmt"
    29  	"github.com/ethereum/go-ethereum/log"
    30  )
    31  
    32  func (op *obcBatch) getPBFTCore() *pbftCore {
    33  	return op.pbft
    34  }
    35  
    36  func obcBatchHelper(id pbftTypes.ReplicaID, config *params.Config,
    37  	stack consensus_pbft.Stack) pbftConsumer {
    38  	// It's not entirely obvious why the compiler likes the parent function, but not newObcBatch directly
    39  	return newObcBatch(id, config, stack)
    40  }
    41  
    42  func TestNetworkBatch(t *testing.T) {
    43  	log.InitLog(5)
    44  	batchSize := 2
    45  	validatorCount := uint32(4)
    46  	net := makeConsumerNetwork(validatorCount, obcBatchHelper, func(ce *consumerEndpoint) {
    47  		ce.consumer.(*obcBatch).batchSize = batchSize
    48  	})
    49  	defer net.stop()
    50  
    51  	broadcaster := net.endpoints[generateBroadcaster(validatorCount)].getHandle()
    52  	err := net.endpoints[1].(*consumerEndpoint).consumer.RecvMsg(createTxMsg(1), broadcaster)
    53  	if err != nil {
    54  		t.Errorf("External request was not processed by backup: %v", err)
    55  	}
    56  	err = net.endpoints[2].(*consumerEndpoint).consumer.RecvMsg(createTxMsg(2), broadcaster)
    57  	if err != nil {
    58  		t.Fatalf("External request was not processed by backup: %v", err)
    59  	}
    60  
    61  	net.process()
    62  	net.process()
    63  
    64  	if l := len(net.endpoints[0].(*consumerEndpoint).consumer.(*obcBatch).batchStore); l != 0 {
    65  		t.Errorf("%d messages expected in primary's batchStore, found %v", 0,
    66  			net.endpoints[0].(*consumerEndpoint).consumer.(*obcBatch).batchStore)
    67  	}
    68  
    69  	for _, ep := range net.endpoints {
    70  		ce := ep.(*consumerEndpoint)
    71  		block, err := ce.consumer.(*obcBatch).stack.GetState(1)
    72  		if nil != err {
    73  			t.Fatalf("Replica %d executed requests, expected a new block on the chain, but could not retrieve it : %s", ce.id, err)
    74  		}
    75  		numTrans := len(block.Payload.(*message.Block).Tasks)
    76  		if numTrans != batchSize {
    77  			t.Fatalf("Replica %d executed %d requests, expected %d",
    78  				ce.id, numTrans, batchSize)
    79  		}
    80  	}
    81  }
    82  
    83  var inertState = &omniProto{
    84  	GetBlockchainInfoImpl: func() *message.StateInfo {
    85  		return &message.StateInfo{
    86  			Hash: pbftTypes.MessageDigest("GENESIS"),
    87  			Number:           1,
    88  		}
    89  	},
    90  	GetBlockchainInfoBlobImpl: func() []byte {
    91  		stateInfo := message.StateInfo{
    92  			Hash: pbftTypes.MessageDigest("GENESIS"),
    93  			Number:           1,
    94  		}
    95  		b, _ := singletons.Marshaler.Marshal(stateInfo)
    96  		return b
    97  	},
    98  	GetNetworkInfoImpl : func() (self pbftTypes.Peer, network []pbftTypes.Peer, err error) {
    99  		return nil,nil,nil
   100  	},
   101  	GetNetworkHandlesImpl : func() (self *pbftTypes.PeerID, network []*pbftTypes.PeerID, err error){
   102  		return nil,nil,nil
   103  	},
   104  	InvalidateStateImpl: func() {},
   105  	ValidateStateImpl:   func() {},
   106  	UpdateStateImpl:     func(id interface{}, target *message.StateInfo, peers []*pbftTypes.PeerID) {},
   107  }
   108  
   109  func TestClearOutstandingReqsOnStateRecovery(t *testing.T) {
   110  	log.InitLog(5)
   111  	omni := *inertState
   112  	omni.UnicastImpl = func(msg *message.Message, receiverHandle *pbftTypes.PeerID) error { return nil }
   113  	b := newObcBatch(0, loadConfig(), &omni)
   114  	b.StateUpdated(&checkpointMessage{seqNo: 0, snapshotId: inertState.GetBlockchainInfoBlobImpl()}, inertState.GetBlockchainInfoImpl())
   115  
   116  	defer b.Close()
   117  
   118  	b.reqStore.storeOutstanding(&message.Request{})
   119  
   120  	b.manager.Queue() <- stateUpdatedEvent{
   121  		chkpt: &checkpointMessage{
   122  			seqNo: 10,
   123  		},
   124  	}
   125  
   126  	b.manager.Queue() <- nil
   127  
   128  	if b.reqStore.outstandingRequests.Len() != 0 {
   129  		t.Fatalf("Should not have any requests outstanding after completing state transfer")
   130  	}
   131  }
   132  
   133  func TestOutstandingReqsIngestion(t *testing.T) {
   134  	log.InitLog(5)
   135  	bs := [3]*obcBatch{}
   136  	peerId := stringToPeerId("vp1")
   137  	for i := range bs {
   138  		omni := *inertState
   139  		omni.UnicastImpl = func(ocMsg *message.Message, peer *pbftTypes.PeerID) error { return nil }
   140  		bs[i] = newObcBatch(pbftTypes.ReplicaID(i), loadConfig(), &omni)
   141  		defer bs[i].Close()
   142  
   143  		// Have vp1 only deliver messages
   144  		if i == 1 {
   145  			omni.UnicastImpl = func(ocMsg *message.Message, peer *pbftTypes.PeerID) error {
   146  				dest, _ := omni.GetValidatorID(peer)
   147  				if dest == 0 || dest == 2 {
   148  
   149  					bs[dest].RecvMsg(ocMsg, &peerId)
   150  				}
   151  				return nil
   152  			}
   153  		}
   154  	}
   155  	for i := range bs {
   156  		bs[i].StateUpdated(&checkpointMessage{seqNo: 0, snapshotId: inertState.GetBlockchainInfoBlobImpl()}, inertState.GetBlockchainInfoImpl())
   157  	}
   158  
   159  	err := bs[1].RecvMsg(createTxMsg(1), &peerId)
   160  	if err != nil {
   161  		t.Fatalf("External request was not processed by backup: %v", err)
   162  	}
   163  
   164  	for _, b := range bs {
   165  		b.manager.Queue() <- nil
   166  		b.broadcaster.Wait()
   167  		b.manager.Queue() <- nil
   168  	}
   169  
   170  	for i, b := range bs {
   171  		b.manager.Queue() <- nil
   172  		count := b.reqStore.outstandingRequests.Len()
   173  		if count != 1 {
   174  			t.Errorf("Batch backup %d should have the request in its store", i)
   175  		}
   176  	}
   177  }
   178  
   179  func TestOutstandingReqsResubmission(t *testing.T) {
   180  	log.InitLog(5)
   181  	config := loadConfig()
   182  	config.BatchSize = 2
   183  	omni := *inertState
   184  	b := newObcBatch(0, config, &omni)
   185  	defer b.Close() // The broadcasting threads only cause problems here... but this test stalls without them
   186  
   187  	transactionsBroadcast := 0
   188  	omni.ExecuteImpl = func(tag interface{}, tasks []*message.Task) {
   189  		transactionsBroadcast += len(tasks)
   190  		fmt.Printf("\nExecuting %d transactions (%v)\n", len(tasks), tasks)
   191  		nextExec := b.pbft.lastExec + 1
   192  		b.pbft.currentExec = &nextExec
   193  		b.manager.Inject(executedEvent{tag: tag})
   194  	}
   195  
   196  	omni.CommitImpl = func(tag interface{}, meta []byte) {
   197  		b.manager.Inject(committedEvent{})
   198  	}
   199  
   200  	omni.UnicastImpl = func(ocMsg *message.Message, dest *pbftTypes.PeerID) error {
   201  		return nil
   202  	}
   203  
   204  	b.StateUpdated(&checkpointMessage{seqNo: 0, snapshotId: inertState.GetBlockchainInfoBlobImpl()}, inertState.GetBlockchainInfoImpl())
   205  	b.manager.Queue() <- nil // Make sure the state update finishes first
   206  
   207  	reqs := make([]*message.Request, 8)
   208  	for i := 0; i < len(reqs); i++ {
   209  		reqs[i] = createPbftReq(int64(i), 0)
   210  	}
   211  
   212  	// Add four requests, with a batch size of 2
   213  	b.reqStore.storeOutstanding(reqs[0])
   214  	b.reqStore.storeOutstanding(reqs[1])
   215  	b.reqStore.storeOutstanding(reqs[2])
   216  	b.reqStore.storeOutstanding(reqs[3])
   217  
   218  	executed := make(map[pbftTypes.MessageDigest]struct{})
   219  	execute := func() {
   220  		for d, reqBatch := range b.pbft.outstandingReqBatches {
   221  			if _, ok := executed[d]; ok {
   222  				continue
   223  			}
   224  			executed[d] = struct{}{}
   225  			b.execute(b.pbft.lastExec+1, reqBatch)
   226  		}
   227  	}
   228  
   229  	tmp := uint64(1)
   230  	b.pbft.currentExec = &tmp
   231  	events.SendEvent(b, committedEvent{})
   232  	execute()
   233  
   234  	if b.reqStore.outstandingRequests.Len() != 0 {
   235  		t.Fatalf("All request batches should have been executed and deleted after exec")
   236  	}
   237  
   238  	// Simulate changing views, with a request in the qSet, and one outstanding which is not
   239  	wreqsBatch := &message.RequestBatch{reqs[4]}
   240  	prePrep := &message.PrePrepare{
   241  		View:           0,
   242  		SequenceNumber: b.pbft.lastExec + 1,
   243  		BatchDigest:    "foo",
   244  		RequestBatch:   wreqsBatch,
   245  	}
   246  
   247  	b.pbft.certStore[msgID{v: prePrep.View, n: prePrep.SequenceNumber}] = &msgCert{prePrepare: prePrep}
   248  
   249  	// Add the request, which is already pre-prepared, to be outstanding, and one outstanding not pending, not prepared
   250  	b.reqStore.storeOutstanding(reqs[4]) // req 6
   251  	b.reqStore.storeOutstanding(reqs[5])
   252  	b.reqStore.storeOutstanding(reqs[6])
   253  	b.reqStore.storeOutstanding(reqs[7])
   254  
   255  	events.SendEvent(b, viewChangedEvent{})
   256  	execute()
   257  
   258  	if b.reqStore.hasNonPending() {
   259  		t.Errorf("All requests should have been resubmitted after view change")
   260  	}
   261  
   262  	// We should have one request in batch which has not been sent yet
   263  	expected := 6
   264  	if transactionsBroadcast != expected {
   265  		t.Errorf("Expected %d transactions broadcast, got %d", expected, transactionsBroadcast)
   266  	}
   267  
   268  	events.SendEvent(b, batchTimerEvent{})
   269  	execute()
   270  
   271  	// If the already prepared request were to be resubmitted, we would get count 8 here
   272  	expected = 7
   273  	if transactionsBroadcast != expected {
   274  		t.Errorf("Expected %d transactions broadcast, got %d", expected, transactionsBroadcast)
   275  	}
   276  }
   277  
   278  func TestViewChangeOnPrimarySilence(t *testing.T) {
   279  	log.InitLog(5)
   280  	omni := *inertState
   281  	omni.UnicastImpl = func(ocMsg *message.Message, peer *pbftTypes.PeerID) error { return nil } // For the checkpoint
   282  	omni.SignImpl = func(msg []byte) ([]byte, error) { return msg, nil }
   283  	omni.VerifyImpl = func(peerID pbftTypes.ReplicaID, signature []byte, message []byte) error { return nil }
   284  	b := newObcBatch(1, loadConfig(), &omni)
   285  	b.StateUpdated(&checkpointMessage{seqNo: 0, snapshotId: inertState.GetBlockchainInfoBlobImpl()}, inertState.GetBlockchainInfoImpl())
   286  	b.pbft.requestTimeout = 50 * time.Millisecond
   287  	defer b.Close()
   288  
   289  	// Send a request, which will be ignored, triggering view change
   290  	peerId := stringToPeerId("vp0")
   291  	b.manager.Queue() <- batchMessageEvent{createTxMsg(1), &peerId}
   292  	time.Sleep(time.Second)
   293  	b.manager.Queue() <- nil
   294  
   295  	if b.pbft.activeView {
   296  		t.Fatalf("Should have caused a view change")
   297  	}
   298  }
   299  
   300  func obcBatchSizeOneHelper(id pbftTypes.ReplicaID, config *params.Config, stack consensus_pbft.Stack) pbftConsumer {
   301  	// It's not entirely obvious why the compiler likes the parent function, but not newObcClassic directly
   302  	config.BatchSize = 1
   303  	return newObcBatch(id, config, stack)
   304  }
   305  
   306  func TestClassicStateTransfer(t *testing.T) {
   307  	log.InitLog(5)
   308  	validatorCount := uint32(4)
   309  	net := makeConsumerNetwork(validatorCount, obcBatchSizeOneHelper, func(ce *consumerEndpoint) {
   310  		ce.consumer.(*obcBatch).pbft.K = 2
   311  		ce.consumer.(*obcBatch).pbft.L = 4
   312  	})
   313  	defer net.stop()
   314  	// net.debug = true
   315  
   316  	filterMsg := true
   317  	net.filterFn = func(src int, dst int, msg []byte) []byte {
   318  		if filterMsg && dst == 3 { // 3 is byz
   319  			return nil
   320  		}
   321  		return msg
   322  	}
   323  
   324  	// Advance the network one seqNo past so that Replica 3 will have to do statetransfer
   325  	broadcaster := net.endpoints[generateBroadcaster(validatorCount)].getHandle()
   326  	net.endpoints[1].(*consumerEndpoint).consumer.RecvMsg(createTxMsg(1), broadcaster)
   327  	net.process()
   328  
   329  	// Move the seqNo to 9, at seqNo 6, Replica 3 will realize it's behind, transfer to seqNo 8, then execute seqNo 9
   330  	filterMsg = false
   331  	for n := 2; n <= 9; n++ {
   332  		net.endpoints[1].(*consumerEndpoint).consumer.RecvMsg(createTxMsg(int64(n)), broadcaster)
   333  	}
   334  
   335  	net.process()
   336  
   337  	for _, ep := range net.endpoints {
   338  		ce := ep.(*consumerEndpoint)
   339  		obc := ce.consumer.(*obcBatch)
   340  		_, err := obc.stack.GetState(9)
   341  		if nil != err {
   342  			t.Errorf("Replica %d executed requests, expected a new block on the chain, but could not retrieve it : %s", ce.id, err)
   343  		}
   344  		if !obc.pbft.activeView || obc.pbft.view != 0 {
   345  			t.Errorf("Replica %d not active in view 0, is %v %d", ce.id, obc.pbft.activeView, obc.pbft.view)
   346  		}
   347  	}
   348  }
   349  
   350  func TestClassicBackToBackStateTransfer(t *testing.T) {
   351  	log.InitLog(5)
   352  	validatorCount := uint32(4)
   353  	net := makeConsumerNetwork(validatorCount, obcBatchSizeOneHelper, func(ce *consumerEndpoint) {
   354  		ce.consumer.(*obcBatch).pbft.K = 2
   355  		ce.consumer.(*obcBatch).pbft.L = 4
   356  		ce.consumer.(*obcBatch).pbft.requestTimeout = time.Hour // We do not want any view changes
   357  	})
   358  	defer net.stop()
   359  	// net.debug = true
   360  
   361  	filterMsg := true
   362  	net.filterFn = func(src int, dst int, msg []byte) []byte {
   363  		if filterMsg && dst == 3 { // 3 is byz
   364  			return nil
   365  		}
   366  		return msg
   367  	}
   368  
   369  	// Get the group to advance past seqNo 1, leaving Replica 3 behind
   370  	broadcaster := net.endpoints[generateBroadcaster(validatorCount)].getHandle()
   371  	net.endpoints[1].(*consumerEndpoint).consumer.RecvMsg(createTxMsg(1), broadcaster)
   372  	net.process()
   373  
   374  	// Now start including Replica 3, go to sequence number 10, Replica 3 will trigger state transfer
   375  	// after seeing seqNo 8, then pass another target for seqNo 10 and 12, but transfer to 8, but the network
   376  	// will have already moved on and be past to seqNo 13, outside of Replica 3's watermarks, but
   377  	// Replica 3 will execute through seqNo 12
   378  	filterMsg = false
   379  	for n := 2; n <= 21; n++ {
   380  		net.endpoints[1].(*consumerEndpoint).consumer.RecvMsg(createTxMsg(int64(n)), broadcaster)
   381  	}
   382  
   383  	net.process()
   384  
   385  	for _, ep := range net.endpoints {
   386  		ce := ep.(*consumerEndpoint)
   387  		obc := ce.consumer.(*obcBatch)
   388  		_, err := obc.stack.GetState(21)
   389  		if nil != err {
   390  			t.Errorf("Replica %d executed requests, expected a new block on the chain, but could not retrieve it : %s", ce.id, err)
   391  		}
   392  		if !obc.pbft.activeView || obc.pbft.view != 0 {
   393  			t.Errorf("Replica %d not active in view 0, is %v %d", ce.id, obc.pbft.activeView, obc.pbft.view)
   394  		}
   395  	}
   396  }
   397  
   398  func TestClearBatchStoreOnViewChange(t *testing.T) {
   399  	log.InitLog(5)
   400  	omni := *inertState
   401  	omni.UnicastImpl = func(ocMsg *message.Message, peer *pbftTypes.PeerID) error { return nil } // For the checkpoint
   402  	b := newObcBatch(1, loadConfig(), &omni)
   403  	b.StateUpdated(&checkpointMessage{seqNo: 0, snapshotId: inertState.GetBlockchainInfoBlobImpl()}, inertState.GetBlockchainInfoImpl())
   404  	defer b.Close()
   405  
   406  	b.batchStore = append(b.batchStore,&message.Request{})
   407  
   408  	// Send a request, which will be ignored, triggering view change
   409  	b.manager.Queue() <- viewChangedEvent{}
   410  	b.manager.Queue() <- nil
   411  
   412  	if len(b.batchStore) != 0 {
   413  		t.Fatalf("Should have cleared the batch store on view change")
   414  	}
   415  }