github.com/cranelv/ethereum_mpc@v0.0.0-20191031014521-23aeb1415092/mpcService/mpc_distributor.go (about)

     1  package mpcService
     2  
     3  import (
     4  	"crypto/ecdsa"
     5  	"errors"
     6  	"math/big"
     7  	"sync"
     8  	"github.com/ethereum/go-ethereum/mpcService/protocol"
     9  	"github.com/ethereum/go-ethereum/common"
    10  	"github.com/ethereum/go-ethereum/p2p/discover"
    11  	"github.com/ethereum/go-ethereum/p2p"
    12  	"github.com/ethereum/go-ethereum/rlp"
    13  	"github.com/ethereum/go-ethereum/accounts/keystore"
    14  	"github.com/ethereum/go-ethereum/common/hexutil"
    15  	"github.com/ethereum/go-ethereum/crypto/secp256k1"
    16  	"time"
    17  	"sort"
    18  	"github.com/ethereum/go-ethereum/log"
    19  	"github.com/ethereum/go-ethereum/core/types"
    20  	"math/rand"
    21  	"github.com/ethereum/go-ethereum/mpcService/crypto"
    22  )
    23  const (
    24  	MpcInterval = 30
    25  	MpcCacity =   20
    26  )
    27  type MpcContextCreater interface {
    28  	CreateContext(uint, common.Hash, []protocol.PeerInfo,*discover.NodeID, ...protocol.MpcValue) (MpcInterface, error) //createContext
    29  }
    30  
    31  type MpcInterface interface {
    32  	getMessage(*discover.NodeID, *protocol.MpcMessage) error
    33  	mainMPCProcess(manager protocol.MpcManager) error
    34  	SubscribeResult(chan<- *protocol.MpcResult)
    35  	quit(error)
    36  }
    37  
    38  type P2pMessager interface {
    39  	SendToPeer(*discover.NodeID, uint64, interface{}) error
    40  	MessageChannel() <-chan protocol.PeerMessage
    41  	GetPeers()[]*discover.NodeID
    42  	Self() *discover.Node
    43  	IsActivePeer(*discover.NodeID) bool
    44  }
    45  type MpcKey struct {
    46  	Hash common.Hash
    47  	Address common.MpcAddress
    48  }
    49  func NodeInfoKey (hash common.Hash,addr common.MpcAddress)common.Hash{
    50  	return types.RlpHash(&MpcKey{hash,addr})
    51  }
    52  type mpcNodeInfo struct {
    53  	mu             sync.RWMutex
    54  	mpcHashTimer   [][]*common.Hash
    55  	mpcNodeInfo    map[common.Hash]protocol.MpcNodeInterface
    56  }
    57  func NewMpcNodeInfo()*mpcNodeInfo{
    58  	return &mpcNodeInfo{
    59  		mpcHashTimer: make([][]*common.Hash,1),
    60  		mpcNodeInfo: make(map[common.Hash]protocol.MpcNodeInterface),
    61  	}
    62  }
    63  func (mn* mpcNodeInfo) getEnoughNodeInfo()[]protocol.MpcNodeInterface{
    64  	var NodeAry []protocol.MpcNodeInterface
    65  	mn.mu.RLock()
    66  	defer mn.mu.RUnlock()
    67  	for _,nodeInfo := range mn.mpcNodeInfo{
    68  		if nodeInfo.FetchQuorum() {
    69  			NodeAry = append(NodeAry,nodeInfo)
    70  		}
    71  	}
    72  	return NodeAry
    73  }
    74  func (mn* mpcNodeInfo) getNodeInfo(hash common.Hash,addr common.MpcAddress)protocol.MpcNodeInterface{
    75  	nodeHash := NodeInfoKey(hash,addr)
    76  	mn.mu.RLock()
    77  	defer mn.mu.RUnlock()
    78  	return mn.mpcNodeInfo[nodeHash]
    79  }
    80  func (mn* mpcNodeInfo) RunContextNodeID(hash common.Hash,addr common.MpcAddress,seed uint64,nodeId *discover.NodeID)protocol.MpcState{
    81  	nodeHash := NodeInfoKey(hash,addr)
    82  	mn.mu.RLock()
    83  	defer mn.mu.RUnlock()
    84  	item,exist := mn.mpcNodeInfo[nodeHash]
    85  	if exist {
    86  		return item.RunNode(seed,nodeId)
    87  	}
    88  	return protocol.MpcNotFound
    89  }
    90  func (mn* mpcNodeInfo) ChangeMpcState(hash common.Hash,addr common.MpcAddress,state protocol.MpcState)protocol.MpcState{
    91  	nodeHash := NodeInfoKey(hash,addr)
    92  	mn.mu.Lock()
    93  	defer mn.mu.Unlock()
    94  	item,exist := mn.mpcNodeInfo[nodeHash]
    95  	if exist {
    96  		state = item.GetState()
    97  		if state < protocol.MpcRunning{
    98  			item.SetState(protocol.MpcRunning)
    99  		}
   100  		return state
   101  	}
   102  	return protocol.MpcNotFound
   103  }
   104  func (mn* mpcNodeInfo) setMpcState(hash common.Hash,addr common.MpcAddress,state protocol.MpcState){
   105  	nodeHash := NodeInfoKey(hash,addr)
   106  	mn.mu.RLock()
   107  	defer mn.mu.RUnlock()
   108  	item,exist := mn.mpcNodeInfo[nodeHash]
   109  	if exist {
   110  		item.SetState(state)
   111  	}
   112  }
   113  func (mn* mpcNodeInfo) addNodeInfo(hash common.Hash,seed uint64,nodeId *discover.NodeID,
   114  	peers []*discover.NodeID,leader *discover.NodeID,key *keystore.MpcKey)error{
   115  	nodeHash := NodeInfoKey(hash,key.Address)
   116  	mn.mu.Lock()
   117  	defer mn.mu.Unlock()
   118  	item,exist := mn.mpcNodeInfo[nodeHash]
   119  	if exist {
   120  		return item.AddNode(seed,nodeId)
   121  	}else {
   122  		index := len(mn.mpcHashTimer)-1
   123  		mn.mpcHashTimer[index] = append(mn.mpcHashTimer[index],&nodeHash)
   124  		item = NewNodeCollection(hash,peers,leader,key)
   125  		mn.mpcNodeInfo[nodeHash] = item
   126  		return item.AddNode(seed,nodeId)
   127  	}
   128  }
   129  func (mn* mpcNodeInfo) getPeerInfo(hash common.Hash,addr common.MpcAddress)[]protocol.PeerInfo{
   130  	nodeHash := NodeInfoKey(hash,addr)
   131  	mn.mu.RLock()
   132  	defer mn.mu.RUnlock()
   133  	if nodeInfo,exist := mn.mpcNodeInfo[nodeHash];exist{
   134  		return nodeInfo.GetPeers()
   135  	}
   136  	return nil
   137  }
   138  func (mn* mpcNodeInfo) newTimer(time uint64)[]*common.Hash{
   139  	mn.mu.Lock()
   140  	defer mn.mu.Unlock()
   141  	var hashAry []*common.Hash
   142  	count := len(mn.mpcHashTimer)
   143  	if count > MpcCacity {
   144  		for i:=0;i<count-MpcCacity ;i++  {
   145  			hashAry = append(hashAry,mn.mpcHashTimer[i]...)
   146  		}
   147  		mn.mpcHashTimer = mn.mpcHashTimer[count-MpcCacity:]
   148  	}
   149  	mn.mpcHashTimer = append(mn.mpcHashTimer,[]*common.Hash{})
   150  	/*
   151  	count = len(mn.mpcHashTimer)
   152  	if count > 2 {
   153  		reSendhash := mn.mpcHashTimer[count-3]
   154  		for i:=0;i<len(reSendhash);i++{
   155  			if nodeInfo,exist := mn.mpcNodeInfo[*reSendhash[i]];exist{
   156  				if nodeInfo.GetState() == protocol.MpcRunning{
   157  					nodeInfo.SetState(protocol.MpcWaiting)
   158  				}
   159  			}
   160  		}
   161  	}
   162  	if count > 3 {
   163  		reSendhash := mn.mpcHashTimer[count-3]
   164  		for i:=0;i<len(reSendhash);i++{
   165  			if nodeInfo,exist := mn.mpcNodeInfo[*reSendhash[i]];exist{
   166  				state := nodeInfo.GetState()
   167  				if state == protocol.MpcRunning || state  == protocol.MpcWaiting {
   168  					nodeInfo.SetState(protocol.MpcCollection)
   169  				}
   170  			}
   171  		}
   172  	}
   173  	*/
   174  	for _,hash := range hashAry {
   175  		delete (mn.mpcNodeInfo,*hash)
   176  	}
   177  	return hashAry
   178  }
   179  type mpcCtxInfo struct {
   180  	mu             sync.RWMutex
   181  	mpcCtxInfo    map[common.Hash]MpcInterface
   182  }
   183  func (mn* mpcCtxInfo) getMpcCtx(hash common.Hash)MpcInterface{
   184  	mn.mu.RLock()
   185  	defer mn.mu.RUnlock()
   186  	return mn.mpcCtxInfo[hash]
   187  }
   188  func (mn* mpcCtxInfo) hasMpcCtx(hash common.Hash)bool{
   189  	mn.mu.RLock()
   190  	defer mn.mu.RUnlock()
   191  	_,exist := mn.mpcCtxInfo[hash]
   192  	return exist
   193  }
   194  func (mn* mpcCtxInfo) setMpcCtx(hash common.Hash,mpcCtx MpcInterface){
   195  	mn.mu.Lock()
   196  	defer mn.mu.Unlock()
   197  	mn.mpcCtxInfo[hash] = mpcCtx
   198  }
   199  func (mn* mpcCtxInfo) removeMpcCtx(hash common.Hash){
   200  	mn.mu.Lock()
   201  	defer mn.mu.Unlock()
   202  	delete(mn.mpcCtxInfo,hash)
   203  }
   204  type MpcDistributor struct {
   205  	mpcCreater     MpcContextCreater
   206  	nodeInfoMap    *mpcNodeInfo
   207  	mpcMap         mpcCtxInfo
   208  	mpcKeyStore    *keystore.MpcKeyStore
   209  	P2pMessager    P2pMessager
   210  	quit		   chan struct{}
   211  	result		   chan *protocol.MpcResult
   212  	password       string
   213  }
   214  
   215  func CreateMpcDistributor(keystoreDir string, msger P2pMessager, password string) *MpcDistributor {
   216  	mpc := &MpcDistributor{
   217  		mpcCreater:     &MpcCtxFactory{},
   218  		mpcMap:         mpcCtxInfo{mpcCtxInfo:make(map[common.Hash]MpcInterface)},
   219  		nodeInfoMap:    NewMpcNodeInfo(),
   220  		mpcKeyStore: 	keystore.NewMpcKeyStore(keystoreDir),
   221  		quit:			make(chan struct{},1),
   222  		result: 		make(chan *protocol.MpcResult,64),
   223  		password:       password,
   224  		P2pMessager:    msger,
   225  	}
   226  	return mpc
   227  }
   228  func (mpcServer *MpcDistributor) MessageLoop(){
   229  	ticker := time.NewTicker(MpcInterval*time.Second)
   230  	ticker1 := time.NewTicker(3100*time.Millisecond)
   231  	defer ticker.Stop()
   232  	for {
   233  		select{
   234  		case msg:= <-mpcServer.P2pMessager.MessageChannel():
   235  //			length := len(mpcServer.P2pMessager.MessageChannel())
   236  //			if length == 0 {
   237  //				log.Error("MessageLoop","MessageLoopSeed",mpcServer.SelfNodeId(),"MessageChannel Length",length)
   238  //			}
   239  			mpcServer.GetMessage(msg.From,msg.Message)
   240  		case result := <- mpcServer.result :
   241  			if result != nil{
   242  				log.Error("Mpc Context Result:","hash",result.Hash,"Result",result.Result)
   243  			}
   244  		case <-ticker.C:
   245  			go mpcServer.NewTime()
   246  		case <-ticker1.C:
   247  			go mpcServer.CreateRequestMpc()
   248  		case <-mpcServer.quit:
   249  			mpcServer.quitLoop()
   250  			return
   251  		}
   252  	}
   253  }
   254  func (mpcServer *MpcDistributor)NewTime(){
   255  //	log.Error("(mpcServer *MpcDistributor)NewTime()")
   256  	sec := time.Now().Second()/MpcInterval
   257  	hashAry := mpcServer.nodeInfoMap.newTimer(uint64(sec))
   258  	for _,hash := range hashAry{
   259  		mpcMsg := &protocol.MpcMessage{ContextID: *hash,
   260  			StepID: 0,
   261  			Error: "Mpc Time out"}
   262  		go mpcServer.BoardcastMessage(nil, protocol.MSG_MPCError, mpcMsg)
   263  	}
   264  }
   265  func (mpcServer *MpcDistributor)Start(){
   266  	go mpcServer.MessageLoop()
   267  }
   268  func (mpcServer *MpcDistributor)Stop(){
   269  	select {
   270  	case mpcServer.quit <- struct{}{}:
   271  	default:
   272  	}
   273  }
   274  func (mpcServer *MpcDistributor)quitLoop(){
   275  	mpcServer.mpcMap.mu.Lock()
   276  	defer mpcServer.mpcMap.mu.Unlock()
   277  	for _,mpc := range mpcServer.mpcMap.mpcCtxInfo{
   278  		mpc.quit(errors.New("MpcServer Stop!"))
   279  	}
   280  	mpcServer.mpcMap.mpcCtxInfo = make(map[common.Hash]MpcInterface)
   281  }
   282  
   283  func GetAddressAndHash(message *protocol.MpcMessage)(hash common.Hash,address common.MpcAddress,have bool){
   284  	n := 0
   285  	for _,item := range message.Data{
   286  		if item.Key == protocol.MpcAddress {
   287  			address = common.BytesToMpcAddress(item.Data.([]byte))
   288  			n++
   289  		}else if item.Key == protocol.MpcTxHash {
   290  			hash = common.BytesToHash(item.Data.([]byte))
   291  			n++
   292  		}
   293  	}
   294  	have = n == 2
   295  	return
   296  }
   297  func (mpcServer *MpcDistributor) GetMessage(PeerID discover.NodeID, msg *p2p.Msg) error {
   298  	log.Info("MpcDistributor GetMessage begin!", "msgCode", msg.Code)
   299  
   300  	switch msg.Code {
   301  	case protocol.StatusCode:
   302  		// this should not happen, but no need to panic; just ignore this message.
   303  		log.Info("unxepected status message received, peer:%s", PeerID.String())
   304  
   305  	case protocol.KeepaliveCode:
   306  		// this should not happen, but no need to panic; just ignore this message.
   307  
   308  	case protocol.KeepaliveOkCode:
   309  		// this should not happen, but no need to panic; just ignore this message.
   310  
   311  	case protocol.MSG_MPCError:
   312  		var mpcMessage protocol.MpcMessage
   313  		err := rlp.Decode(msg.Payload, &mpcMessage)
   314  		if err != nil {
   315  			log.Error("MpcDistributor.GetMessage, rlp decode MPCError msg fail. err:%s", err.Error())
   316  			return err
   317  		}
   318  
   319  //		log.Error("MpcDistributor.GetMessage, MPCError message received.","peer",PeerID,"error", mpcMessage.Error)
   320  		go mpcServer.QuitMpcContext(&mpcMessage)
   321  	case protocol.MSG_RequestPrepare:
   322  		var mpcMessage protocol.MpcMessage
   323  		err := rlp.Decode(msg.Payload, &mpcMessage)
   324  		if err != nil {
   325  			log.Error("MpcDistributor.GetMessage, rlp decode RequestMPC msg fail. err:%s", err.Error())
   326  			return err
   327  		}
   328  
   329  		go mpcServer.handleMpcPrepqare(&PeerID,&mpcMessage)
   330  
   331  	case protocol.MSG_RequestMPC:
   332  //		return nil
   333  		log.Info("MpcDistributor.GetMessage, RequestMPC message received.","peer", PeerID)
   334  		var mpcMessage protocol.MpcMessage
   335  		err := rlp.Decode(msg.Payload, &mpcMessage)
   336  		if err != nil {
   337  			log.Error("MpcDistributor.GetMessage, rlp decode RequestMPC msg fail."," err", err.Error())
   338  			return err
   339  		}
   340  		go func(inputMsg protocol.MpcMessage){
   341  			err := mpcServer.createMpcContext(&inputMsg,PeerID)
   342  			if err != nil{
   343  				if err == protocol.ErrMpcContextExist{
   344  
   345  				}else if err == protocol.ErrMpcFinish{
   346  					hash,addr,have := GetAddressAndHash(&inputMsg)
   347  					if have {
   348  						mpcMsg := &protocol.MpcMessage{ContextID: inputMsg.ContextID,
   349  							StepID: 0,
   350  							Data:[]*protocol.MpcData{&protocol.MpcData{protocol.MpcTxHash,hash},
   351  							&protocol.MpcData{protocol.MpcAddress,addr}},
   352  						}
   353  						go mpcServer.BoardcastMessage(nil, protocol.MSG_MPCFinish, mpcMsg)
   354  					}
   355  				}else{
   356  					mpcMsg := &protocol.MpcMessage{ContextID: inputMsg.ContextID,
   357  						StepID: 0,
   358  						Error: err.Error()}
   359  					var peers []protocol.PeerInfo
   360  					if len(inputMsg.Peers)>0 {
   361  						rlp.DecodeBytes(inputMsg.Peers,&peers)
   362  					}
   363  					nodeIds := make([]*discover.NodeID,len(peers))
   364  					for i:=0;i<len(peers);i++{
   365  						nodeIds[i] = peers[i].PeerID
   366  					}
   367  					//mpcServer.nodeInfoMap.setMpcState(inputMsg.ContextID,protocol.MpcCollection)
   368  					go mpcServer.BoardcastMessage(nodeIds, protocol.MSG_MPCError, mpcMsg)
   369  				}
   370  			}else{
   371  				hash,addr,have := GetAddressAndHash(&inputMsg)
   372  				if have {
   373  					mpcServer.nodeInfoMap.setMpcState(hash,addr,protocol.MpcFinish)
   374  				}
   375  			}
   376  		}(mpcMessage)
   377  	case protocol.MSG_MPCMessage:
   378  //		return nil
   379  		var mpcMessage protocol.MpcMessage
   380  		err := rlp.Decode(msg.Payload, &mpcMessage)
   381  		if err != nil {
   382  			log.Error("GetP2pMessage fail. err:%s", err.Error())
   383  			return err
   384  		}
   385  
   386  		log.Info("MpcDistributor.GetMessage, MPCMessage message received","peer", PeerID)
   387  		go mpcServer.getMpcMessage(&PeerID, &mpcMessage)
   388  	case protocol.MSG_MPCFinish:
   389  		var mpcMessage protocol.MpcMessage
   390  		err := rlp.Decode(msg.Payload, &mpcMessage)
   391  		if err != nil {
   392  			log.Error("GetP2pMessage fail. err:%s", err.Error())
   393  			return err
   394  		}
   395  		go mpcServer.FinishMpc(&PeerID, &mpcMessage)
   396  	default:
   397  		// New message types might be implemented in the future versions of Whisper.
   398  		// For forward compatibility, just ignore.
   399  	}
   400  
   401  	return nil
   402  }
   403  func (mpcServer *MpcDistributor) SendSignRequestPrepare(txHash common.Hash,address common.MpcAddress) error {
   404  	key := mpcServer.mpcKeyStore.GetMpcKey(address)
   405  	if key == nil {
   406  		return errors.New("Keystore is not found")
   407  	}
   408  	peers := mpcServer.P2pMessager.GetPeers()
   409  	err := mpcServer.nodeInfoMap.addNodeInfo(txHash,key.MPCSeed,mpcServer.SelfNodeId(),peers,nil,key)
   410  	if err != nil {
   411  		log.Error("SendSignRequestPrepare","error",err)
   412  		return err
   413  	}
   414  //	log.Error("SendSignRequestPrepare","Self",mpcServer.SelfNodeId(),"seed",key.MPCSeed,"hash",txHash)
   415  	msg := protocol.MpcMessage{
   416  		ContextID:txHash,
   417  		Data:[]*protocol.MpcData{&protocol.MpcData{protocol.MpcTxHash,txHash[:]},
   418  			&protocol.MpcData{protocol.MpcAddress,address[:]},
   419  			&protocol.MpcData{protocol.MpcSeed,key.MPCSeed},},
   420  	}
   421  	go mpcServer.BoardcastMessage(nil,protocol.MSG_RequestPrepare,&msg)
   422  	return nil
   423  }
   424  
   425  func (mpcServer *MpcDistributor)UnlockKeystore(address common.MpcAddress, password string)error{
   426  	return mpcServer.mpcKeyStore.UnlockAddress(address,password)
   427  }
   428  
   429  func (mpcServer *MpcDistributor) loadStoremanAddress(address *common.MpcAddress) (*protocol.MpcValue,  error) {
   430  	log.Info("MpcDistributor.loadStoremanAddress begin","address",address)
   431  
   432  	mpcKey := mpcServer.mpcKeyStore.GetMpcKey(*address)
   433  	if mpcKey == nil {
   434  		return nil,  errors.New("MpcDistributor loadStoremanAddress Error")
   435  	}
   436  	return &protocol.MpcValue{protocol.MpcPrivateShare, mpcKey.PrivateKey}, nil
   437  }
   438  func (mpcServer *MpcDistributor) MpcAccountRequest() (hexutil.Bytes, error) {
   439  	rand.Seed(int64(time.Now().Second()))
   440  	peers := mpcServer.P2pMessager.GetPeers()
   441  	peerInfo := make([]protocol.PeerInfo,len(peers))
   442  	seedMap := make(map[uint64]bool)
   443  	seedMap[0] = true
   444  	for i:=0;i<len(peers);i++{
   445  		seed := rand.Uint64()
   446  		for ;; {
   447  			if _,exist := seedMap[seed];exist{
   448  				seed = rand.Uint64()
   449  			}else{
   450  				break
   451  			}
   452  		}
   453  		peerInfo[i].PeerID = peers[i]
   454  		peerInfo[i].Seed = seed
   455  		seedMap[peerInfo[i].Seed] = true
   456  	}
   457  	return mpcServer.createAccountRequestMpcContext(peerInfo)
   458  }
   459  
   460  func (mpcServer *MpcDistributor) createAccountRequestMpcContext(peers []protocol.PeerInfo) (hexutil.Bytes, error) {
   461  	log.Info("MpcDistributor createRequestMpcContext begin")
   462  	seedMap := make(map[uint64]bool)
   463  	seedMap[0] = true
   464  	rand.Seed(int64(time.Now().Second()))
   465  	for i:=0;i<len(peers);i++{
   466  		seed := rand.Uint64()
   467  		if _,exist := seedMap[seed];exist{
   468  			log.Error("---------------------------error seed")
   469  			continue
   470  		}
   471  		seedMap[seed] = true
   472  		peers[i].Seed = seed
   473  	}
   474  
   475  	rand,_ := crypto.RandFieldElement(secp256k1.S256())
   476  	hash := common.BigToHash(rand)
   477  	mpc, err := mpcServer.mpcCreater.CreateContext(protocol.MpcCreateLockAccountLeader, hash,
   478  		peers,mpcServer.SelfNodeId())
   479  	if err != nil {
   480  		log.Error("MpcDistributor createRequestMpcContext, CreateContext fail. err:%s", err.Error())
   481  		return []byte{}, err
   482  	}
   483  
   484  	log.Info("MpcDistributor createRequestMpcContext, ,","mpcID:",  hash)
   485  
   486  	mpcServer.addMpcContext(hash, mpc)
   487  	defer mpcServer.removeMpcContext(hash)
   488  	err = mpc.mainMPCProcess(mpcServer)
   489  	if err != nil {
   490  		log.Error("MpcDistributor createRequestMpcContext, mainMPCProcess fail. err:%s", err.Error())
   491  		return []byte{}, err
   492  	}
   493  
   494  //	result := mpc.getMpcResult().(common.MpcAddress)
   495  
   496  //	log.Info("MpcDistributor createRequestMpcContext, succeed," ,"result", result)
   497  //	return result[:], nil
   498  	return []byte{}, err
   499  }
   500  func (mpcServer *MpcDistributor)CreateRequestMpc(){
   501  	peer := mpcServer.GetPeerLeader(true)
   502  	if peer == nil{
   503  		return
   504  	}
   505  	log.Info("GetPeerLeader","peer",peer)
   506  	if peer != nil && *peer == *mpcServer.SelfNodeId(){
   507  		nodeInfoAry := mpcServer.nodeInfoMap.getEnoughNodeInfo()
   508  		for i:=0;i<len(nodeInfoAry);i++{
   509  			go func(node protocol.MpcNodeInterface){
   510  				_,ctxHash,err := mpcServer.createRequestMpcContext(*node.Hash(),*node.Address())
   511  				if err!= nil{
   512  					//node.SetState(protocol.MpcCollection)
   513  				}else{
   514  //					log.Error("createRequestMpcContext Successful","result",result)
   515  					node.SetState(protocol.MpcFinish)
   516  					protocol.TLog.Error("createRequestMpcContext Successful")
   517  					mpcMsg := &protocol.MpcMessage{ContextID: *ctxHash,
   518  						StepID: 5,
   519  						Data:[]*protocol.MpcData{&protocol.MpcData{protocol.MpcTxHash,*node.Hash()},
   520  							&protocol.MpcData{protocol.MpcAddress,*node.Address()}},
   521  					}
   522  					go mpcServer.BoardcastMessage(nil, protocol.MSG_MPCFinish, mpcMsg)
   523  				}
   524  			}(nodeInfoAry[i])
   525  		}
   526  	}
   527  }
   528  func (mpcServer *MpcDistributor) createRequestMpcContext(hash common.Hash, from common.MpcAddress) ([]byte,*common.Hash, error) {
   529  //	log.Error("MpcDistributor createRequestMpcContext begin","hash",hash.String())
   530  	nodeInfo := mpcServer.nodeInfoMap.getNodeInfo(hash,from)
   531  	if nodeInfo == nil{
   532  		return []byte{},nil, errors.New("MpcDistributor createRequestMpcContext nodeInfo Nil")
   533  	}
   534  	seed := nodeInfo.GetSeed(mpcServer.SelfNodeId())
   535  	if seed == 0{
   536  		return []byte{},nil, errors.New("MpcDistributor createRequestMpcContext seed Nil")
   537  	}
   538  	peers := nodeInfo.GetPeers()
   539  	if len(peers) > nodeInfo.NeedQuorum(){
   540  		rand.Seed(time.Now().UnixNano())
   541  		nNum := len(peers)
   542  		for ;nNum>nodeInfo.NeedQuorum(); {
   543  			index := rand.Int()%nNum
   544  			if peers[index].Seed != seed{
   545  				if index!=nNum-1 {
   546  					peers[index],peers[nNum-1] = peers[nNum-1],peers[index]
   547  				}
   548  				nNum--
   549  			}
   550  		}
   551  		peers = peers[:nodeInfo.NeedQuorum()]
   552  	}
   553  	preSetValue := []protocol.MpcValue{protocol.MpcValue{protocol.MpcTxHash, hash[:]},
   554  		protocol.MpcValue{protocol.MpcAddress, from[:]}}
   555  
   556  	value, err := mpcServer.loadStoremanAddress(&from)
   557  	if err != nil {
   558  		log.Error("MpcDistributor createRequestMpcContext, loadStoremanAddress fail.","address",from, "error", err)
   559  		return []byte{},nil, err
   560  	}
   561  
   562  	preSetValue = append(preSetValue, *value)
   563  	rand,_ := crypto.RandFieldElement(secp256k1.S256())
   564  	ctxhash := common.BigToHash(rand)
   565  	mpc, err := mpcServer.mpcCreater.CreateContext(protocol.MpcTXSignLeader, ctxhash,peers,mpcServer.SelfNodeId(),  preSetValue...)
   566  	if err != nil {
   567  		log.Error("MpcDistributor createRequestMpcContext, CreateContext fail. err:%s", err.Error())
   568  		return []byte{},nil, err
   569  	}
   570  
   571  	log.Info("MpcDistributor createRequestMpcContext","mpcID", ctxhash)
   572  
   573  	mpcServer.addMpcContext(ctxhash, mpc)
   574  	defer mpcServer.removeMpcContext(ctxhash)
   575  	err = mpc.mainMPCProcess(mpcServer)
   576  	if err != nil {
   577  		log.Error("MpcDistributor createRequestMpcContext, mainMPCProcess fail","error", err)
   578  		return []byte{},&ctxhash, err
   579  	}
   580  
   581  
   582  	return []byte{},&ctxhash, nil
   583  }
   584  
   585  func (mpcServer *MpcDistributor) QuitMpcContext(msg *protocol.MpcMessage) {
   586  //	log.Error("QuitMpcContext","contextId",msg.ContextID,"error",msg.Error)
   587  	mpc := mpcServer.mpcMap.getMpcCtx(msg.ContextID)
   588  	if mpc != nil {
   589  		mpc.quit(errors.New(msg.Error))
   590  	}
   591  }
   592  func (mpcServer *MpcDistributor) GetPeerLeader(bRequest bool)*discover.NodeID{
   593  	second := time.Now().Second()
   594  	if bRequest{
   595  		if second%MpcInterval <=1 || second%MpcInterval+1>=MpcInterval{
   596  			return nil
   597  		}
   598  	}
   599  	epoch := second/MpcInterval
   600  	peers := mpcServer.P2pMessager.GetPeers()
   601  	sort.Sort(protocol.SlicePeers(peers))
   602  	index := epoch%len(peers)
   603  	return peers[index]
   604  
   605  }
   606  func (mpcServer *MpcDistributor) handleMpcPrepqare(peerId *discover.NodeID,mpcMessage *protocol.MpcMessage){
   607  	log.Info("MpcDistributor handleMpcPrepqare begin")
   608  	var seed uint64
   609  	var address common.MpcAddress
   610  	n := 0
   611  	for _,data := range mpcMessage.Data {
   612  		if data.Key == protocol.MpcSeed {
   613  			seed = data.Data.(uint64)
   614  			n++
   615  			if(n>=2){
   616  				break
   617  			}
   618  		}else if data.Key == protocol.MpcAddress{
   619  			address = common.BytesToMpcAddress(data.Data.([]byte))
   620  			n++
   621  			if(n>=2){
   622  				break
   623  			}
   624  		}
   625  	}
   626  	protocol.TLog.Error("handleMpcPrepqare,prepqare-from=%s,prepqare-to=%s,prepqare-seed=%v",peerId.TerminalString(),
   627  		mpcServer.SelfNodeId().TerminalString(),seed)
   628  	if (seed == 0){
   629  		log.Error("handleMpcPrepqare need nonzero seed ")
   630  		return
   631  	}
   632  	peers := mpcServer.P2pMessager.GetPeers()
   633  	key := mpcServer.mpcKeyStore.GetMpcKey(address)
   634  
   635  	if key == nil{
   636  		log.Error("Signature Request need Address")
   637  		return
   638  	}
   639  	err := mpcServer.nodeInfoMap.addNodeInfo(mpcMessage.ContextID,seed,peerId,peers,nil,key)
   640  	if err != nil {
   641  		log.Error(err.Error())
   642  	}
   643  	/*
   644  	peer := mpcServer.GetPeerLeader()
   645  	log.Info("GetPeerLeader","peer",peer)
   646  	enough,err := mpcServer.nodeInfoMap.addNodeInfo(mpcMessage.ContextID,seed,peerId,peers,nil,key)
   647  	if err != nil {
   648  		log.Error(err.Error())
   649  	}else if *peer == *mpcServer.SelfNodeId(){
   650  		log.Info("Mpc Leader Info:","Enough",enough,"Error",err)
   651  		if enough{
   652  			if requestType == 0{
   653  				peers := mpcServer.nodeInfoMap.getPeerInfo(mpcMessage.ContextID)
   654  				go mpcServer.createAccountRequestMpcContext(peers)
   655  			}else{
   656  				go func() {
   657  					_,err := mpcServer.createRequestMpcContext(mpcMessage.ContextID,address)
   658  					if err != nil{
   659  						log.Error("createRequestMpcContext","Error",err)
   660  					}
   661  				}()
   662  			}
   663  		}
   664  	}
   665  	*/
   666  }
   667  func (mpcServer *MpcDistributor) createMpcContext(mpcMessage *protocol.MpcMessage,leader discover.NodeID, preSetValue ...protocol.MpcValue) error {
   668  	log.Info("MpcDistributor createMpcContext begin")
   669  	if mpcServer.mpcMap.hasMpcCtx(mpcMessage.ContextID) {
   670  //		log.Error("createMpcContext fail. err:%s", protocol.ErrMpcContextExist.Error())
   671  		return protocol.ErrMpcContextExist
   672  	}
   673  	var peers []protocol.PeerInfo
   674  	if len(mpcMessage.Peers)>0 {
   675  		rlp.DecodeBytes(mpcMessage.Peers,&peers)
   676  	}
   677  	var ctxType uint
   678  	if len(mpcMessage.Data) == 0{
   679  		ctxType = protocol.MpcCreateLockAccountPeer
   680  	} else {
   681  		ctxType = protocol.MpcTXSignPeer
   682  	}
   683  
   684  	log.Info("createMpcContext","ctxType", ctxType, "ctxId", mpcMessage.ContextID)
   685  	if ctxType == protocol.MpcTXSignPeer {
   686  		log.Info("createMpcContext MpcTXSignPeer")
   687  		if leader != *mpcServer.GetPeerLeader(false){
   688  			return errors.New("Mpc Leader Checked Error")
   689  		}
   690  		address := common.MpcAddress{}
   691  		txHash := common.Hash{}
   692  		for _,item := range mpcMessage.Data{
   693  			preSetValue = append(preSetValue, protocol.MpcValue{item.Key,item.Data})
   694  			if item.Key == protocol.MpcAddress {
   695  				address = common.BytesToMpcAddress(item.Data.([]byte))
   696  			}else if item.Key == protocol.MpcTxHash {
   697  				txHash = common.BytesToHash(item.Data.([]byte))
   698  			}
   699  
   700  		}
   701  		bHave := false
   702  		for i:=0;i<len(peers);i++{
   703  			if *peers[i].PeerID == *mpcServer.SelfNodeId(){
   704  				bHave = true
   705  				break
   706  			}
   707  		}
   708  		if !bHave{
   709  			mpcServer.nodeInfoMap.ChangeMpcState(txHash,address,protocol.MpcRunning)
   710  			return protocol.ErrMpcContextExist
   711  		}
   712  
   713  		// load account
   714  		MpcPrivateShare, err := mpcServer.loadStoremanAddress(&address)
   715  		if err != nil {
   716  			return err
   717  		}
   718  		mpcKey := mpcServer.mpcKeyStore.GetMpcKey(address)
   719  		state := mpcServer.nodeInfoMap.RunContextNodeID(txHash,address,mpcKey.MPCSeed,mpcServer.SelfNodeId())
   720  		if state == protocol.MpcNotFound{
   721  			return errors.New("Mpcserver is not prepared Mpc")
   722  		}else if state == protocol.MpcFinish{
   723  			return protocol.ErrMpcFinish
   724  		}else if state == protocol.MpcRunning{
   725  			return protocol.ErrMpcContextExist
   726  		}
   727  		preSetValue = append(preSetValue, *MpcPrivateShare)
   728  	}
   729  //	log.Error("CreateContext","leader",leader)
   730  	mpc, err := mpcServer.mpcCreater.CreateContext(ctxType, mpcMessage.ContextID,peers,&leader,preSetValue...)
   731  	if err != nil {
   732  		log.Error("createMpcContext, createContext fail, err:%s", err.Error())
   733  		return err
   734  	}
   735  
   736  	go func() {
   737  		mpcServer.addMpcContext(mpcMessage.ContextID, mpc)
   738  		defer mpcServer.removeMpcContext(mpcMessage.ContextID)
   739  		err = mpc.mainMPCProcess(mpcServer)
   740  	}()
   741  
   742  	return nil
   743  }
   744  
   745  func (mpcServer *MpcDistributor) addMpcContext(hash common.Hash, mpc MpcInterface) {
   746  	log.Info("addMpcContext. ","ctxId", hash)
   747  
   748  	mpc.SubscribeResult(mpcServer.result)
   749  	mpcServer.mpcMap.setMpcCtx(hash,mpc)
   750  //	mpcServer.nodeInfoMap.setMpcState(hash,protocol.MpcRunning)
   751  }
   752  
   753  func (mpcServer *MpcDistributor) removeMpcContext(hash common.Hash) {
   754  	log.Info("removeMpcContext. ","ctxId", hash)
   755  	mpcServer.mpcMap.removeMpcCtx(hash)
   756  }
   757  
   758  func (mpcServer *MpcDistributor) getMpcMessage(PeerID *discover.NodeID, mpcMessage *protocol.MpcMessage) error {
   759  	log.Info("getMpcMessage.", "peerid", PeerID,"ctxId",mpcMessage.ContextID,  "stepID", mpcMessage.StepID)
   760  
   761  	mpc := mpcServer.mpcMap.getMpcCtx(mpcMessage.ContextID)
   762  	if mpc != nil {
   763  		return mpc.getMessage(PeerID, mpcMessage)
   764  	}
   765  
   766  	return nil
   767  }
   768  func (mpcServer *MpcDistributor) FinishMpc(PeerID *discover.NodeID, mpcMessage *protocol.MpcMessage) error {
   769  //	log.Error("FinishMpc.", "peerid", PeerID,"ctxId",mpcMessage.ContextID)
   770  
   771  	if mpcMessage.StepID == 0{
   772  //		log.Error("FinishMpc.")
   773  		hash,addr,have := GetAddressAndHash(mpcMessage)
   774  		if have {
   775  			mpcServer.nodeInfoMap.setMpcState(hash,addr,protocol.MpcFinish)
   776  			mpc := mpcServer.mpcMap.getMpcCtx(mpcMessage.ContextID)
   777  			if mpc != nil {
   778  				mpc.quit(protocol.ErrMpcContextExist)
   779  			}
   780  		}
   781  	}else{
   782  		mpc := mpcServer.mpcMap.getMpcCtx(mpcMessage.ContextID)
   783  		if mpc == nil {
   784  			hash,addr,have := GetAddressAndHash(mpcMessage)
   785  			if have {
   786  				mpcServer.nodeInfoMap.setMpcState(hash,addr, protocol.MpcFinish)
   787  			}
   788  		}
   789  	}
   790  
   791  	return nil
   792  }
   793  
   794  func (mpcServer *MpcDistributor) getOwnerP2pMessage(PeerID *discover.NodeID, code uint64, msg interface{}) error {
   795  	switch code {
   796  	case protocol.MSG_MPCMessage:
   797  		mpcMessage := msg.(*protocol.MpcMessage)
   798  		mpcServer.getMpcMessage(PeerID, mpcMessage)
   799  	case protocol.MSG_RequestPrepare:
   800  //		mpcMessage := msg.(*protocol.MpcMessage)
   801  //		go mpcServer.handleMpcPrepqare(PeerID,mpcMessage)
   802  	case protocol.MSG_RequestMPC:
   803  		// do nothing
   804  	}
   805  
   806  	return nil
   807  }
   808  
   809  func (mpcServer *MpcDistributor) SelfNodeId() *discover.NodeID {
   810  	return &mpcServer.P2pMessager.Self().ID
   811  }
   812  
   813  func (mpcServer *MpcDistributor) P2pMessage(peerID *discover.NodeID, code uint64, msg interface{}) error {
   814  	if *peerID == *mpcServer.SelfNodeId() {
   815  		mpcServer.getOwnerP2pMessage(mpcServer.SelfNodeId(), code, msg)
   816  	} else {
   817  		err := mpcServer.P2pMessager.SendToPeer(peerID, code, msg)
   818  		if err != nil {
   819  			log.Error("BoardcastMessage fail. err:%s", err.Error())
   820  		}
   821  	}
   822  
   823  	return nil
   824  }
   825  
   826  func (mpcServer *MpcDistributor) BoardcastMessage(peers []*discover.NodeID, code uint64, msg interface{}) error {
   827  	if peers == nil {
   828  		peers = mpcServer.P2pMessager.GetPeers()
   829  //		if code != 4 && code != 7 {
   830  //			log.Error("----------------------BoardcastMessage","code",code)
   831  //		}
   832  	}
   833  	for i:=0;i<len(peers);i++ {
   834  		if *peers[i] == *mpcServer.SelfNodeId() {
   835  			mpcServer.getOwnerP2pMessage(mpcServer.SelfNodeId(), code, msg)
   836  		} else {
   837  			err := mpcServer.P2pMessager.SendToPeer(peers[i], code, msg)
   838  			if err != nil {
   839  				log.Error("BoardcastMessage fail."," peer,", peers[i],"error", err.Error())
   840  			}
   841  		}
   842  	}
   843  
   844  	return nil
   845  }
   846  
   847  func (mpcServer *MpcDistributor) CreateKeystore(result protocol.MpcResultInterface,nodeInfo protocol.MpcNodeInterface) error {
   848  	log.Info("MpcDistributor.CreateKeystore begin")
   849  	point, err := result.GetValue(protocol.PublicKeyResult)
   850  	if err != nil {
   851  		log.Error("CreateKeystore fail. get PublicKeyResult fail")
   852  		return err
   853  	}
   854  
   855  	private, err := result.GetValue(protocol.MpcPrivateShare)
   856  	if err != nil {
   857  		log.Error("CreateKeystore fail. get MpcPrivateShare fail")
   858  		return err
   859  	}
   860  	pub := point.([2]*big.Int)
   861  	result1 := new(ecdsa.PublicKey)
   862  	result1.Curve = secp256k1.S256()
   863  	result1.X = pub[0]
   864  	result1.Y = pub[1]
   865  	seed := nodeInfo.GetSeed(mpcServer.SelfNodeId())
   866  	peers := nodeInfo.GetPeers()
   867  	mpcGroup := make([]uint64,len(peers))
   868  	for i,peer := range peers{
   869  		mpcGroup[i] = peer.Seed
   870  	}
   871  	account,err := mpcServer.mpcKeyStore.StoreMpcKey(result1,private.(*big.Int),seed,uint64(protocol.MPCDegree*2+1),mpcGroup,mpcServer.password)
   872  	if err != nil {
   873  		return err
   874  	}
   875  //	log.Error("MpcCreateKeystore","address",account)
   876  	result.SetValue(protocol.MpcContextResult, account[:])
   877  	return nil
   878  }
   879  
   880  func (mpcServer *MpcDistributor) SignTransaction(result protocol.MpcResultInterface) error {
   881  	return nil
   882  
   883  }