github.com/reapchain/go-reapchain@v0.2.15-0.20210609012950-9735c110c705/consensus/podc/core/core.go (about)

     1  // Copyright 2017 AMIS Technologies
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package core
    18  
    19  import (
    20  	"bytes"
    21  	"math/big"
    22  	"sync"
    23  	"time"
    24  
    25  	"github.com/ethereum/go-ethereum/common"
    26  	"github.com/ethereum/go-ethereum/consensus/podc"
    27  	"github.com/ethereum/go-ethereum/event"
    28  	"github.com/ethereum/go-ethereum/log"
    29  
    30  	// "github.com/ethereum/go-ethereum/metrics"
    31  	goMetrics "github.com/rcrowley/go-metrics"
    32  	"gopkg.in/karalabe/cookiejar.v2/collections/prque"
    33  )
    34  
    35  // New creates an PoDC consensus core
    36  func New(backend podc.Backend, config *podc.Config) Engine {
    37  	c := &core{
    38  		config:             config,
    39  		address:            backend.Address(),
    40  		state:              StateAcceptRequest, //다 Qman에 ExtraData를 요청하는게 맞나? 아니면, 프런트 노드만 요청하는가?
    41  		logger:             log.New("address", backend.Address()),
    42  		backend:            backend,
    43  		backlogs:           make(map[podc.Validator]*prque.Prque),
    44  		backlogsMu:         new(sync.Mutex),
    45  		pendingRequests:    prque.New(),
    46  		pendingRequestsMu:  new(sync.Mutex),
    47  		consensusTimestamp: time.Time{},
    48  
    49  		// roundMeter:         metrics.NewMeter("consensus/podc/core/round"),
    50  		// sequenceMeter:      metrics.NewMeter("consensus/podc/core/sequence"),
    51  		// consensusTimer:     metrics.NewTimer("consensus/podc/core/consensus"),
    52  		roundMeter:     goMetrics.NewRegisteredMeter("consensus/podc/core/round", nil),
    53  		sequenceMeter:  goMetrics.NewRegisteredMeter("consensus/podc/core/sequence", nil),
    54  		consensusTimer: goMetrics.NewRegisteredTimer("consensus/podc/core/consensus", nil),
    55  		racingMu:       new(sync.Mutex),
    56  	}
    57  	c.validateFn = c.checkValidatorSignature
    58  	return c
    59  }
    60  
    61  // ----------------------------------------------------------------------------
    62  
    63  type core struct {
    64  	config  *podc.Config
    65  	address common.Address
    66  	state   State
    67  	logger  log.Logger
    68  
    69  	backend podc.Backend
    70  	events  *event.TypeMuxSubscription
    71  
    72  	//qmanager common.Address
    73  
    74  	lastProposer common.Address
    75  
    76  	lastProposal podc.Proposal
    77  	lastSequence *big.Int //yichoi added for solving inconsistent
    78  	valSet       podc.ValidatorSet
    79  	//voteSet      podc.ValidatorSet
    80  
    81  	waitingForRoundChange bool
    82  	//waitingForStateChange bool // added for PoDC, because Proposer release it's right to random coordinator
    83  	//statechange flag will be need, I think , by yichoi
    84  
    85  	validateFn func([]byte, []byte) (common.Address, error)
    86  
    87  	backlogs   map[podc.Validator]*prque.Prque
    88  	backlogsMu *sync.Mutex
    89  
    90  	current *roundState
    91  
    92  	roundChangeSet   *roundChangeSet
    93  	roundChangeTimer *time.Timer
    94  
    95  	pendingRequests   *prque.Prque
    96  	pendingRequestsMu *sync.Mutex
    97  
    98  	consensusTimestamp time.Time
    99  	// the meter to record the round change rate
   100  	roundMeter goMetrics.Meter
   101  	// the meter to record the sequence update rate
   102  	sequenceMeter goMetrics.Meter
   103  	// the timer to record consensus duration (from accepting a preprepare to final committed stage)
   104  	consensusTimer goMetrics.Timer
   105  
   106  	tag          common.Tag
   107  	count        uint
   108  	racingFlag   bool
   109  	racingMu     *sync.Mutex
   110  	startTime    time.Time
   111  	intervalTime time.Time
   112  	criteria     float64 //Criteria 	int		`json:"criteria"` type.go에서는  int 로 받았다가, 여기서는 float64
   113  	//agreeCriteria   int
   114  	ExtraDataLength int
   115  }
   116  
   117  // 최종 전송할 메시지를 만듦
   118  func (c *core) finalizeMessage(msg *message) ([]byte, error) {
   119  	var err error
   120  	// Add sender address
   121  	msg.Address = c.Address() //message 에 송신자 enode 주소를 탑재
   122  
   123  	// Add proof of consensus
   124  	msg.CommittedSeal = []byte{} // CommittedSeal 배열 초기화
   125  	// Assign the CommittedSeal if it's a commit message and proposal is not nil
   126  	if msg.Code == msgCommit && c.current.Proposal() != nil {
   127  		seal := PrepareCommittedSeal(c.current.Proposal().Hash()) // message 구조체에 CommittedSeal 배열을 채움
   128  		msg.CommittedSeal, err = c.backend.Sign(seal)
   129  		if err != nil {
   130  			return nil, err
   131  		}
   132  	}
   133  
   134  	// Sign message
   135  	data, err := msg.PayloadNoSig()
   136  	if err != nil {
   137  		return nil, err
   138  	}
   139  	msg.Signature, err = c.backend.Sign(data)
   140  	if err != nil {
   141  		return nil, err
   142  	}
   143  
   144  	// Convert to payload
   145  	payload, err := msg.Payload()
   146  	if err != nil {
   147  		return nil, err
   148  	}
   149  
   150  	return payload, nil
   151  }
   152  
   153  func (c *core) send(msg *message, target common.Address) {
   154  	logger := c.logger.New("state", c.state)
   155  
   156  	payload, err := c.finalizeMessage(msg)
   157  	if err != nil {
   158  		logger.Error("Failed to finalize message", "msg", msg, "err", err)
   159  		return
   160  	}
   161  
   162  	// Send payload
   163  	if err = c.backend.Send(payload, target); err != nil {
   164  		logger.Error("Failed to send message", "msg", msg, "err", err)
   165  		return
   166  	}
   167  }
   168  
   169  // message 구조체 내에 enode address가 있음.
   170  func (c *core) broadcast(msg *message) {
   171  	logger := c.logger.New("state", c.state)
   172  
   173  	payload, err := c.finalizeMessage(msg) //최종적으로 메시지 구조체에 탑재할 모든 메시지를 만듦
   174  	if err != nil {
   175  		logger.Error("Failed to finalize message", "msg", msg, "err", err)
   176  		return
   177  	}
   178  
   179  	// Broadcast payload
   180  	if err = c.backend.Broadcast(c.valSet, payload); err != nil {
   181  		logger.Error("Failed to broadcast message", "msg", msg, "err", err)
   182  		return
   183  	}
   184  }
   185  
   186  func (c *core) multicast(msg *message, targets []common.Address) {
   187  	logger := c.logger.New("state", c.state)
   188  
   189  	payload, err := c.finalizeMessage(msg)
   190  	if err != nil {
   191  		logger.Error("Failed to finalize message", "msg", msg, "err", err)
   192  		return
   193  	}
   194  
   195  	if err = c.backend.Multicast(payload, targets); err != nil {
   196  		logger.Error("Failed to multicast message", "msg", msg, "err", err)
   197  		return
   198  	}
   199  }
   200  
   201  func (c *core) currentView() *podc.View {
   202  	return &podc.View{
   203  		Sequence: new(big.Int).Set(c.current.Sequence()),
   204  		Round:    new(big.Int).Set(c.current.Round()),
   205  	}
   206  }
   207  func (c *core) isRequestQman() bool {
   208  	v := c.valSet
   209  	if v == nil {
   210  		return false
   211  	}
   212  	return v.IsProposer(c.backend.Address()) //Proposer인지 체크함. 여기서 ,  함수 내부 수정해야함.
   213  	//Front node 인가
   214  }
   215  func (c *core) isProposer() bool {
   216  	v := c.valSet
   217  	if v == nil {
   218  		return false
   219  	}
   220  	return v.IsProposer(c.backend.Address()) //Proposer인지 체크함. 여기서 ,
   221  }
   222  
   223  func (c *core) commit() {
   224  	c.setState(StateCommitted)
   225  
   226  	proposal := c.current.Proposal()
   227  	if proposal != nil {
   228  		var signatures []byte
   229  		for _, v := range c.current.Commits.Values() {
   230  			signatures = append(signatures, v.CommittedSeal...)
   231  		}
   232  
   233  		if err := c.backend.Commit(proposal, signatures); err != nil {
   234  			c.sendNextRoundChange()
   235  			return
   236  		}
   237  	}
   238  }
   239  
   240  func (c *core) startNewRound(newView *podc.View, roundChange bool) {
   241  	var logger log.Logger
   242  	if c.current == nil {
   243  		logger = c.logger.New("old_round", -1, "old_seq", 0, "old_proposer", c.valSet.GetProposer())
   244  	} else {
   245  		logger = c.logger.New("old_round", c.current.Round(), "old_seq", c.current.Sequence(), "old_proposer", c.valSet.GetProposer()) //1.
   246  	}
   247  
   248  	c.ExtraDataLength = 0 //TODO-REAP: workaround for disappeared racing msg
   249  
   250  	c.valSet = c.backend.Validators(c.lastProposal)
   251  	// Clear invalid round change messages
   252  	c.roundChangeSet = newRoundChangeSet(c.valSet)
   253  	// New snapshot for new round
   254  	c.current = newRoundState(newView, c.valSet)
   255  	// Calculate new proposer
   256  	c.valSet.CalcProposer(c.lastProposer, newView.Round.Uint64())
   257  	c.waitingForRoundChange = false
   258  	c.setState(StateRequestQman)
   259  	if roundChange && c.isProposer() {
   260  		log.Debug("force next round")
   261  		c.backend.NextRound()
   262  	}
   263  	c.newRoundChangeTimer()
   264  	//logger.Debug("New round", "new_round", newView.Round, "new_seq", newView.Sequence, "new_proposer", c.valSet.GetProposer(), "valSet", c.valSet.List(), "size", c.valSet.Size())
   265  	logger.Debug("New round", "new_round", newView.Round, "new_seq", newView.Sequence, "new_proposer", c.valSet.GetProposer(), "size", c.valSet.Size())
   266  }
   267  
   268  func (c *core) catchUpRound(view *podc.View) {
   269  	logger := c.logger.New("old_round", c.current.Round(), "old_seq", c.current.Sequence(), "old_proposer", c.valSet.GetProposer())
   270  
   271  	if view.Round.Cmp(c.current.Round()) > 0 {
   272  		c.roundMeter.Mark(new(big.Int).Sub(view.Round, c.current.Round()).Int64())
   273  	}
   274  	c.waitingForRoundChange = true
   275  	c.current = newRoundState(view, c.valSet)
   276  	c.roundChangeSet.Clear(view.Round)
   277  	c.newRoundChangeTimer()
   278  
   279  	logger.Trace("Catch up round", "new_round", view.Round, "new_seq", view.Sequence, "new_proposer", c.valSet)
   280  }
   281  
   282  func (c *core) setState(state State) {
   283  	if c.state != state { //상태가 다르면, 입력 파라미터 상태로 설
   284  		c.state = state
   285  	}
   286  	if state == StateAcceptRequest || state == StateRequestQman {
   287  		c.processPendingRequests() //바로 보내지않고, 지연시켜서 보내는, sendevent 핸들러에게 보내서,,,
   288  	}
   289  	c.processBacklog()
   290  }
   291  
   292  func (c *core) Address() common.Address {
   293  	return c.address
   294  }
   295  
   296  func (c *core) newRoundChangeTimer() {
   297  	if c.roundChangeTimer != nil {
   298  		c.roundChangeTimer.Stop()
   299  	}
   300  
   301  	// set timeout based on the round number
   302  	timeout := time.Duration(c.config.RequestTimeout)*time.Millisecond + time.Duration(c.current.Round().Uint64()*c.config.BlockPeriod)*time.Second
   303  	// 타임아웃 시간은 우측 수식에 의해서 계산됨 값.
   304  	c.roundChangeTimer = time.AfterFunc(timeout, func() {
   305  		log.Debug("timeout round change")
   306  		// If we're not waiting for round change yet, we can try to catch up
   307  		// the max round with F+1 round change message. We only need to catch up
   308  		// if the max round is larger than current round.
   309  		if !c.waitingForRoundChange { // bool 값
   310  			maxRound := c.roundChangeSet.MaxRound(c.valSet.F() + 1)
   311  			if maxRound != nil && maxRound.Cmp(c.current.Round()) > 0 {
   312  				c.sendRoundChange(maxRound)
   313  			} else {
   314  				c.sendNextRoundChange()
   315  			}
   316  		} else {
   317  			c.sendNextRoundChange()
   318  		}
   319  	})
   320  }
   321  
   322  func (c *core) checkValidatorSignature(data []byte, sig []byte) (common.Address, error) {
   323  	return podc.CheckValidatorSignature(c.valSet, data, sig)
   324  }
   325  
   326  // PrepareCommittedSeal returns a committed seal for the given hash
   327  func PrepareCommittedSeal(hash common.Hash) []byte {
   328  	var buf bytes.Buffer
   329  	buf.Write(hash.Bytes())
   330  	buf.Write([]byte{byte(msgCommit)})
   331  	return buf.Bytes()
   332  }
   333  
   334  func (c *core) Tag() common.Tag {
   335  	return c.tag
   336  }
   337  
   338  func (c *core) SetTag(t common.Tag) {
   339  	c.tag = t
   340  }
   341  
   342  func (c *core) GetValidatorListExceptQman() []common.Address {
   343  	var addrList []common.Address
   344  	// log.Debug("GetValidatorListExceptQman", "c.valSet.List()", len(c.valSet.List()))
   345  
   346  	for _, val := range c.valSet.List() {
   347  		// log.Debug("GetValidatorListExceptQman", "addr", val.Address().Hex())
   348  		//if val.Address() != c.qmanager {
   349  		addrList = append(addrList, val.Address())
   350  		//}
   351  	}
   352  
   353  	return addrList
   354  }