github.com/sixexorg/magnetic-ring@v0.0.0-20191119090307-31705a21e419/consense/dpoa/dpoamgr.go (about)

     1  package dpoa
     2  
     3  import (
     4  	"encoding/json"
     5  	"errors"
     6  	"fmt"
     7  	"reflect"
     8  	"sync"
     9  	"time"
    10  
    11  	"github.com/ontio/ontology-eventbus/actor"
    12  	"github.com/sixexorg/magnetic-ring/account"
    13  	"github.com/sixexorg/magnetic-ring/bactor"
    14  	"github.com/sixexorg/magnetic-ring/consense/dpoa/comm"
    15  	"github.com/sixexorg/magnetic-ring/core/mainchain/types"
    16  	"github.com/sixexorg/magnetic-ring/log"
    17  	txpool "github.com/sixexorg/magnetic-ring/txpool/mainchain"
    18  )
    19  
    20  type PaxState uint8
    21  
    22  const (
    23  	PaxStateMin PaxState = iota
    24  	PaxStateInit
    25  	PaxStateWaiting
    26  	PaxStateProcessing
    27  	PaxStateObserving
    28  	PaxStateDone
    29  	PaxStateTimeout
    30  	PaxStateMax
    31  )
    32  
    33  type PaxRole uint8
    34  
    35  const (
    36  	PaxRoleMin PaxRole = iota
    37  	PaxRoleProcesser
    38  	PaxRoleObserver
    39  	PaxRoleFail
    40  	PaxRoleMax
    41  )
    42  
    43  type DpoaMgr struct {
    44  	sync.RWMutex
    45  	txpool   *txpool.TxPool
    46  	partiCfg *comm.ParticiConfig
    47  	paxState PaxState
    48  	role     PaxRole
    49  	recvCh   chan interface{}
    50  	sendCh   chan interface{}
    51  	handles  map[string]reflect.Value
    52  	paxosIns *Paxos
    53  	cfg      *Config
    54  	store    *BlockStore
    55  	msgpool  *MsgPool
    56  	annPid   *actor.PID
    57  	p2pPid   *actor.PID
    58  }
    59  
    60  func NewdpoaMgr(cfg *Config, store *BlockStore, msgpool *MsgPool, p2pActor *actor.PID) *DpoaMgr {
    61  	txpool, _ := txpool.GetPool()
    62  	p := &DpoaMgr{cfg: cfg, handles: make(map[string]reflect.Value), paxState: PaxStateInit, partiCfg: &comm.ParticiConfig{},
    63  		sendCh: make(chan interface{}, 100), recvCh: make(chan interface{}, 100), store: store, msgpool: msgpool, txpool: txpool}
    64  	p.paxosIns = NewPaxos(cfg.accountStr, cfg.account, p.sendCh)
    65  	p.register(comm.P1a{}, p.paxosIns.HandleP1a)
    66  	p.register(comm.P1b{}, p.paxosIns.HandleP1b)
    67  	p.register(comm.P2a{}, p.paxosIns.HandleP2a)
    68  	p.register(comm.P2b{}, p.paxosIns.HandleP2b)
    69  	p.annPid, _ = bactor.GetActorPid(bactor.MAINRADARACTOR)
    70  	p.p2pPid = p2pActor
    71  	return p
    72  }
    73  
    74  func (n *DpoaMgr) register(m interface{}, f interface{}) {
    75  	t := reflect.TypeOf(m)
    76  	fn := reflect.ValueOf(f)
    77  	if fn.Kind() != reflect.Func || fn.Type().NumIn() != 1 || fn.Type().In(0) != t {
    78  		fmt.Println("----------------------------", fn.Kind() != reflect.Func, fn.Type().NumIn() != 1, fn.Type().In(0))
    79  		panic("register handle function error")
    80  	}
    81  	n.handles[t.String()] = fn
    82  }
    83  
    84  func (n *DpoaMgr) Run() {
    85  	log.Info("node start running", "accountStr:", n.cfg.accountStr)
    86  	go n.recv()
    87  }
    88  
    89  // recv receives messages from socket and pass to message channel
    90  func (n *DpoaMgr) recv() {
    91  	for {
    92  		select {
    93  		case m := <-n.recvCh:
    94  			if n.paxState != PaxStateProcessing {
    95  				continue
    96  			}
    97  			v := reflect.ValueOf(m)
    98  			name := v.Type().String()
    99  			//fmt.Println("==>node handle", v.Type().String(), msg)
   100  			f, exists := n.handles[name]
   101  			if !exists {
   102  				log.Error("no registered handle function for message type %v", name)
   103  			}
   104  			f.Call([]reflect.Value{v})
   105  			//case blkNum := <-n.annNewBlock:
   106  			//	if blkNum == n.partiCfg.blkNum {
   107  			//		close(n.workCh)
   108  			//	}
   109  		}
   110  	}
   111  }
   112  
   113  func (n *DpoaMgr) SendCh() chan<- interface{} {
   114  	return n.recvCh
   115  }
   116  func (n *DpoaMgr) SendData(data interface{}) {
   117  	n.recvCh <- data
   118  }
   119  
   120  func (n *DpoaMgr) RecvCh() <-chan interface{} {
   121  	return n.sendCh
   122  }
   123  
   124  type PaxosSt struct {
   125  	state   PaxState
   126  	patiCfg *comm.ParticiConfig
   127  }
   128  
   129  func (n *DpoaMgr) GetpaxState() *PaxosSt {
   130  	n.RLock()
   131  	defer n.RUnlock()
   132  
   133  	return &PaxosSt{state: n.paxState, patiCfg: n.partiCfg}
   134  }
   135  
   136  func (n *DpoaMgr) updatePartions(timeoutCount int, blkNum uint64) error {
   137  	n.paxState = PaxStateInit
   138  	stars := n.store.GetCurStars()
   139  	ebgHeight := n.store.EpochBegin()
   140  	lastBlk, _ := n.store.getSealedBlock(blkNum - 1)
   141  	var times int
   142  	if blkNum-1 == ebgHeight {
   143  		times = timeoutCount
   144  	} else {
   145  		times = int(lastBlk.GetViews()) + timeoutCount + 1
   146  	}
   147  	partiNums, epochViews := CalcStellar(float64(len(stars)))
   148  	if times > epochViews-1 {
   149  		return errors.New(fmt.Sprintf("genblock completee, wait earth block"))
   150  	}
   151  
   152  	if blkNum-1 == ebgHeight || n.partiCfg.BlkNum < ebgHeight {
   153  		n.partiCfg = &comm.ParticiConfig{
   154  			PartiRaw:   stars,
   155  			ProcNodes:  make([]string, 0),
   156  			ObserNodes: make([]string, 0),
   157  			FailsNodes: make([]string, 0),
   158  		}
   159  		var blkData *comm.Block
   160  		if ebgHeight == 0 { // 创世
   161  			blkData, _ = n.store.getGeisisBlock()
   162  		} else {
   163  			blkData, _ = n.store.getSealedBlock(ebgHeight)
   164  		}
   165  		vrfValue := getParticipantSelectionSeed(blkData)
   166  		if vrfValue.IsNil() {
   167  			return errors.New(fmt.Sprintf("DpoaMgr updatePartions getParticipantSelectionSeed vrf is nil"))
   168  		}
   169  		n.partiCfg.StarsSorted = calcParticipantPeers(vrfValue, stars)
   170  	}
   171  
   172  	log.Info("DpoaMgr updatePartions",
   173  		"timeoutCount", timeoutCount, "blkNum", blkNum, "partiNums", partiNums, "epochViews",epochViews, "times",times, "ebgHeight",ebgHeight, "len(n.partiCfg.StarsSorted)",len(n.partiCfg.StarsSorted))
   174  	n.partiCfg.BlkNum = blkNum
   175  	n.partiCfg.View = uint32(times)
   176  	n.partiCfg.ProcNodes = n.partiCfg.StarsSorted[times*partiNums : (times+1)*partiNums]
   177  	n.partiCfg.ObserNodes = n.partiCfg.StarsSorted[(times+1)*partiNums:]
   178  	fmt.Println("---------------------procNodes", n.partiCfg.ProcNodes)
   179  	fmt.Println("---------------------obserNodes", n.partiCfg.ObserNodes)
   180  	n.annPid.Tell(n.partiCfg.ProcNodes)
   181  
   182  	if err := n.updateRole(); err != nil {
   183  		//log.Error("DpoaMgr startwork updateRole err:%v", err)
   184  		return err
   185  	}
   186  
   187  	return nil
   188  }
   189  
   190  func (n *DpoaMgr) genBlockData() *comm.Block {
   191  	timeoutSigs := make([][]byte, 0)
   192  	for view, data := range n.msgpool.GetTimeoutMsgs(n.store.db.GetCurrentBlockHeight() + 1) {
   193  		if data.established {
   194  			for publicKey, sig := range data.tmSigs {
   195  				var d []byte
   196  				d = append(d, int2Byte(uint16(view))...)
   197  				idx, _ := GetIndex(n.store.GetCurStars(), publicKey)
   198  				fmt.Println("----------timeout", publicKey, idx, view, n.store.db.GetCurrentBlockHeight()+1)
   199  				d = append(d, int2Byte(uint16(idx))...)
   200  				d = append(d, sig...)
   201  				timeoutSigs = append(timeoutSigs, d)
   202  			}
   203  		}
   204  	}
   205  
   206  	prevBlk, _ := n.store.getSealedBlock(n.store.db.GetCurrentBlockHeight())
   207  	//fmt.Println("$$$$$$$$$$$$$$$$$$$$$$$$$$$4", n.store.db.GetCurrentBlockHeight(), prevBlk.Block.Header.Hash().String())
   208  	if prevBlk == nil {
   209  		//return nil, fmt.Errorf("failed to get prevBlock (%d)", blkNum-1)
   210  	}
   211  	blocktimestamp := uint64(time.Now().Unix())
   212  	if prevBlk.Block.Header.Timestamp >= blocktimestamp {
   213  		blocktimestamp = prevBlk.Block.Header.Timestamp + 1
   214  	}
   215  	vrfValue, vrfProof, err := computeVrf(n.cfg.account.(*account.NormalAccountImpl).PrivKey, n.store.db.GetCurrentBlockHeight()+1, prevBlk.GetVrfValue())
   216  	if err != nil {
   217  
   218  		//return nil, fmt.Errorf("failed to get vrf and proof: %s", err)
   219  		log.Error("computeVrf failed to get vrf and proof", "err", err)
   220  		return nil
   221  	}
   222  
   223  	lastConfigBlkNum := prevBlk.Info.LastConfigBlockNum
   224  	if prevBlk.Info.NewChainConfig != nil {
   225  		lastConfigBlkNum = prevBlk.GetBlockNum()
   226  	}
   227  
   228  	vbftBlkInfo := &comm.VbftBlockInfo{
   229  		View:               n.partiCfg.View,
   230  		Miner:              n.cfg.accountStr,
   231  		VrfValue:           vrfValue,
   232  		VrfProof:           vrfProof,
   233  		LastConfigBlockNum: lastConfigBlkNum,
   234  	}
   235  	consensusPayload, _ := json.Marshal(vbftBlkInfo)
   236  	log.Info("func dpoa genBlockData begin")
   237  
   238  	//  []map[common.Address]uint64
   239  	// GenerateMainTx([]map[common.Address]uint64)
   240  	blkinfo := n.txpool.Execute()
   241  
   242  	log.Info("func dpoa genBlockData", "blockHeight", blkinfo.Block.Header.Height, "txlen", blkinfo.Block.Transactions.Len())
   243  	fmt.Println("func dpoa genBlockData", "blockHeight", blkinfo.Block.Header.Height, "txlen", blkinfo.Block.Transactions.Len(), blkinfo.Block.Header.Hash().String())
   244  
   245  	blkinfo.Block.Header.Timestamp = blocktimestamp
   246  	blkinfo.Block.Header.ConsensusPayload = consensusPayload
   247  	blkinfo.Block.Sigs = &types.SigData{TimeoutSigs: timeoutSigs, FailerSigs: make([][]byte, 0), ProcSigs: make([][]byte, 0)}
   248  	//fmt.Println("========--------->>>>>>>>>>", blkinfo.Block.Header.Height, n.store.getLatestBlockNumber())
   249  	block, _ := comm.InitVbftBlock(blkinfo.Block)
   250  
   251  	return block
   252  }
   253  
   254  func (n *DpoaMgr) startwork(timeoutCount int, blkNum uint64, stopCh chan struct{}) {
   255  	if err := n.updatePartions(timeoutCount, blkNum); err != nil {
   256  		//log.Error("DpoaMgr startwork updatePartions err:%v", err)
   257  		return
   258  	}
   259  	notice := &comm.ConsenseNotify{BlkNum: n.partiCfg.BlkNum, ProcNodes: n.partiCfg.ProcNodes, Istart: true}
   260  	n.p2pPid.Tell(notice)
   261  
   262  	go func() {
   263  		switch n.role {
   264  		case PaxRoleProcesser:
   265  			//n.handleProcess()
   266  			n.paxosIns.partiCfg = n.partiCfg
   267  			n.paxState = PaxStateProcessing
   268  			log.Info("func dpoa dpoamgr startwork")
   269  			n.paxosIns.ConsensProcess(n.genBlockData(), 100, n.partiCfg, stopCh)
   270  			n.paxState = PaxStateTimeout
   271  		case PaxRoleObserver:
   272  			n.paxState = PaxStateObserving
   273  			select {
   274  			case <-stopCh:
   275  				n.paxState = PaxStateTimeout
   276  			}
   277  		case PaxRoleFail:
   278  		default:
   279  			log.Info("DpoaMgr handle default", "acc", n.cfg.accountStr, "blknum", n.partiCfg.BlkNum, "view", n.partiCfg.View)
   280  		}
   281  	}()
   282  }
   283  
   284  func (n *DpoaMgr) updateRole() (err error) {
   285  	defer func() {
   286  		if err == nil {
   287  			log.Info("DpoaMgr updateRole ", "accountStr", n.cfg.accountStr, "BlkNum", n.partiCfg.BlkNum, "View", n.partiCfg.View, "role", n.role)
   288  		}
   289  	}()
   290  
   291  	publicStr := n.cfg.accountStr
   292  	n.role = PaxRoleMax
   293  
   294  	for _, v := range n.partiCfg.ObserNodes {
   295  		if v == publicStr {
   296  			n.role = PaxRoleObserver
   297  			return
   298  		}
   299  	}
   300  
   301  	for _, v := range n.partiCfg.ProcNodes {
   302  		if v == publicStr {
   303  			n.role = PaxRoleProcesser
   304  			return
   305  		}
   306  	}
   307  
   308  	for _, v := range n.partiCfg.FailsNodes {
   309  		if v == publicStr {
   310  			n.role = PaxRoleFail
   311  			return
   312  		}
   313  	}
   314  
   315  	err = errors.New(fmt.Sprintf("DpoaMgr updateRole unknow role, obser:%v proc:%v fail:%v ",
   316  		n.partiCfg.ObserNodes, n.partiCfg.ProcNodes, n.partiCfg.FailsNodes))
   317  	return
   318  }