github.com/sixexorg/magnetic-ring@v0.0.0-20191119090307-31705a21e419/consense/dpoa/store.go (about) 1 package dpoa 2 3 import ( 4 "fmt" 5 "sync" 6 7 "time" 8 9 "github.com/ontio/ontology-eventbus/actor" 10 "github.com/sixexorg/magnetic-ring/consense/dpoa/comm" 11 "github.com/sixexorg/magnetic-ring/log" 12 "github.com/sixexorg/magnetic-ring/node" 13 "github.com/sixexorg/magnetic-ring/radar/mainchain" 14 "github.com/sixexorg/magnetic-ring/store/mainchain/storages" 15 "github.com/sixexorg/magnetic-ring/store/mainchain/validation" 16 ) 17 18 type BlockStore struct { 19 sync.RWMutex 20 21 db *storages.LedgerStoreImp 22 cacheLen uint64 23 pendingBlocks map[uint64]*comm.Block 24 epochEnd uint64 25 //curStars []string 26 //earthNode string 27 accountStr string 28 gesisBlk *comm.Block 29 failersMap map[uint64]map[string]struct{} 30 //epochNotify chan struct{} 31 mainRadar *mainchain.LeagueConsumers 32 p2pActor *actor.PID 33 curHeight uint64 34 } 35 36 func NewBlockStore(db *storages.LedgerStoreImp, acc string, p2pActor *actor.PID) (*BlockStore, error) { 37 //db. 38 bk, _ := db.GetBlockByHeight(1) 39 gensis, _ := comm.InitVbftBlock(bk) 40 //fmt.Println("$$$$$$$$$$$$$$$$$$$$$$$$$$$$@@@@@@@@@@@@@@@@@@@@@@@@@@", gensis.Block.Header.Height, len(gensis.getVrfValue()),hex.EncodeToString(gensis.getVrfValue())) 41 return &BlockStore{ 42 //earthNode: "04d2db562f13d94fd31d5d500152cac0bfd1692b9fc1185f2fbea712dbd34f7e6c65ce05303ee3a4ce772e0513c75e95a3f3dcc97ea45e22cfebbe3a658de4a493", 43 db: db, 44 cacheLen: 500, 45 gesisBlk: gensis, 46 accountStr: acc, 47 epochEnd: 1, 48 pendingBlocks: make(map[uint64]*comm.Block), 49 failersMap: make(map[uint64]map[string]struct{}), 50 mainRadar: mainchain.GetLeagueConsumersInstance(), 51 p2pActor: p2pActor, 52 curHeight: db.GetCurrentBlockHeight(), 53 //curStars: []string{ 54 // //"04dc2c38fd4985a30f31fe9ab0e1d6ffa85d33d5c8f0a5ff38c45534e8f4ffe751055e6f1bbec984839dd772a68794722683ed12994feb84a13b8086162591418f", 55 // //"044a147deabaa89e15aab6f586ce8c9b68bf11043ca83387b125d82489252d94e858f7b43a43000d7a949ff1bd8742fcad57434d08b2455bfc5f681dd0cf0a32f6", 56 // //"0405269acdc54c24220f67911a3ac709b129ff2454717875b789288954bb2e6afaed4d59ff0f4c4d14afff9f6f4e1ffb71a1e5457fa2ca7440a020218559ab7f3f", 57 // //"04dc2c38fd4985a30f31fe9ab0e1d6ffa85d33d5c8f0a5ff38c45534e8f4ffe751055e6f1bbec984839dd772a68794722683ed12994feb84a13b8086162591418f", 58 // //"044a147deabaa89e15aab6f586ce8c9b68bf11043ca83387b125d82489252d94e858f7b43a43000d7a949ff1bd8742fcad57434d08b2455bfc5f681dd0cf0a32f6", 59 // //"0405269acdc54c24220f67911a3ac709b129ff2454717875b789288954bb2e6afaed4d59ff0f4c4d14afff9f6f4e1ffb71a1e5457fa2ca7440a020218559ab7f3f", 60 // "04dc2c38fd4985a30f31fe9ab0e1d6ffa85d33d5c8f0a5ff38c45534e8f4ffe751055e6f1bbec984839dd772a68794722683ed12994feb84a13b8086162591418f", 61 // "044a147deabaa89e15aab6f586ce8c9b68bf11043ca83387b125d82489252d94e858f7b43a43000d7a949ff1bd8742fcad57434d08b2455bfc5f681dd0cf0a32f6", 62 // "0405269acdc54c24220f67911a3ac709b129ff2454717875b789288954bb2e6afaed4d59ff0f4c4d14afff9f6f4e1ffb71a1e5457fa2ca7440a020218559ab7f3f"}, 63 }, nil 64 } 65 66 func (pool *BlockStore) getGeisisBlock() (*comm.Block, error) { 67 pool.RLock() 68 defer pool.RUnlock() 69 70 return pool.gesisBlk, nil 71 } 72 73 func (pool *BlockStore) getSealedBlock(blockNum uint64) (*comm.Block, error) { 74 pool.RLock() 75 defer pool.RUnlock() 76 if blk, present := pool.pendingBlocks[blockNum]; present { 77 return blk, nil 78 } 79 block, err := pool.db.GetBlockByHeight(blockNum) 80 if err != nil { 81 return nil, err 82 } 83 return comm.InitVbftBlock(block) 84 } 85 86 func (pool *BlockStore) getLatestBlock() (*comm.Block, error) { 87 pool.RLock() 88 defer pool.RUnlock() 89 blockNum := pool.db.GetCurrentBlockHeight() 90 if blk, present := pool.pendingBlocks[blockNum]; present { 91 return blk, nil 92 } 93 block, err := pool.db.GetBlockByHeight(blockNum) 94 if err != nil { 95 return nil, err 96 } 97 98 return comm.InitVbftBlock(block) 99 } 100 101 func (pool *BlockStore) getLatestBlockNumber() uint64 { 102 pool.RLock() 103 defer pool.RUnlock() 104 105 return pool.db.GetCurrentBlockHeight() 106 } 107 108 func (pool *BlockStore) GetBlock(blknum uint64) (*comm.Block, error) { 109 pool.RLock() 110 defer pool.RUnlock() 111 112 return pool.getSealedBlock(blknum) 113 } 114 115 // 持久化落地 116 func (pool *BlockStore) setBlockSealed(block *comm.Block) error { 117 pool.Lock() 118 defer pool.Unlock() 119 120 log.Info("setBlockSealed", "block", block) 121 122 if block == nil { 123 return fmt.Errorf("try add nil block") 124 } 125 126 if block.GetBlockNum() <= pool.db.GetCurrentBlockHeight() { 127 log.Warn("chain store adding chained block(%d, %d)", block.GetBlockNum(), pool.getLatestBlockNumber()) 128 return nil 129 } 130 // 131 if block.Block.Header == nil { 132 panic("nil block header") 133 } 134 log.Info("func dpoa store setBlockSealed", "blockHeight", block.Block.Header.Height, "txlen", block.Block.Transactions.Len()) 135 //if err := pool.addSignaturesToBlockLocked(block, forEmpty); err != nil { 136 // return fmt.Errorf("failed to add sig to block: %s", err) 137 //} 138 // 139 //var sealed *comm.Block 140 //if !forEmpty { 141 // // remove empty block 142 // sealed = &comm.Block{ 143 // Block: block.Block, 144 // Info: block.Info, 145 // } 146 //} else { 147 // // replace with empty block 148 // sealed = &comm.Block{ 149 // Block: block.EmptyBlock, 150 // Info: block.Info, 151 // } 152 //} 153 log.Info("func dpoa setBlockSealed 01", "blockHeight", block.Block.Header.Height, "txlen", block.Block.Transactions.Len()) 154 pool.pendingBlocks[block.GetBlockNum()] = block 155 //receipts types.Receipts, 156 // accountStates states.AccountStates, 157 // leagueStates states.LeagueStates, 158 // leagueMemmaps map[common.Address]map[common.Address]*states.LeagueMember 159 blkNum := pool.db.GetCurrentBlockHeight() + 1 160 log.Info("func dpoa setBlockSealed 02", "realBlockHeight", pool.db.GetCurrentBlockHeight()) 161 //fmt.Println("====================+!@@@@@@@@@@@@@@@@@@", block.Block.Header.Height, block.Block.Header.PrevBlockHash.String(), block.Block.Header.Timestamp) 162 for { 163 blk, present := pool.pendingBlocks[blkNum] 164 log.Info("func dpoa setBlockSealed 03", "isnil", blk == nil, "present", present) 165 if blk, present := pool.pendingBlocks[blkNum]; blk != nil && present { 166 log.Info("func dpoa setBlockSealed 04", "blockHeight", blk.Block.Header.Height, "txlen", blk.Block.Transactions.Len()) 167 blkInfo, err := validation.ValidateBlock(blk.Block, pool.db) 168 if err != nil { 169 log.Error("func dpoa setBlockSealed 05 validateBlock failed", "err", err) 170 return err 171 } 172 if blkInfo.ObjTxs.Len() > 0 { 173 fmt.Println("🔯 consensus CheckLeaguesWithBoolResponse start") 174 err = pool.mainRadar.CheckLeaguesWithBoolResponse(blkInfo.ObjTxs, time.Second*5) 175 if err != nil { 176 log.Error("func dpoa setBlockSealed 06 validateBlock failed ", "err", err) 177 fmt.Println("🔯 consensus save failed", err, " 🌐 🔐 p2p sync next height unlocked") 178 return err 179 } 180 } 181 err = pool.db.SaveAll(blkInfo) 182 if err != nil && blkNum > pool.getLatestBlockNumber() { 183 return fmt.Errorf("ledger add blk (%d, %d) failed: %s", blkNum, pool.db.GetCurrentBlockHeight(), err) 184 } 185 if blkNum != pool.db.GetCurrentBlockHeight() { 186 log.Error("chain store added chained block (%d, %d): %s", 187 blkNum, pool.db.GetCurrentBlockHeight(), err) 188 } 189 190 delete(pool.pendingBlocks, blkNum) 191 blkNum++ 192 } else { 193 break 194 } 195 } 196 return nil 197 } 198 199 func (pool *BlockStore) onBlockSealed(blockNum uint64) { 200 pool.Lock() 201 defer pool.Unlock() 202 203 if blockNum-pool.cacheLen > 0 { 204 toFree := make([]uint64, 0) 205 for blkNum, _ := range pool.pendingBlocks { 206 if blkNum < blockNum-pool.cacheLen { 207 toFree = append(toFree, blkNum) 208 } 209 } 210 for _, blkNum := range toFree { 211 delete(pool.pendingBlocks, blkNum) 212 } 213 } 214 215 var blkData *comm.Block 216 if v, ok := pool.pendingBlocks[blockNum]; ok { 217 blkData = v 218 } else { 219 block, err := pool.db.GetBlockByHeight(blockNum) 220 if err != nil { 221 return 222 } 223 blkData, err = comm.InitVbftBlock(block) 224 if err != nil { 225 log.Error("------------------BlockStore onBlockSealed InitVbftBlock", "err", err) 226 } 227 pool.pendingBlocks[blockNum] = blkData 228 log.Info("func dpoa store onBlockSealed", "blockHeight", blkData.Block.Header.Height, "txlen", blkData.Block.Transactions.Len()) 229 } 230 //fmt.Println("------->>>>>>>>>>20190110onBlockSealed persist block", "time", time.Now().String(), "miner", blkData.GetProposer(), "blocknum", blkData.GetBlockNum(), "hash", blkData.Block.Hash().String(), "txs",len(blkData.Block.Transactions)) 231 log.Info("------->>>>>>>>>>20190110onBlockSealed persist block", "miner", blkData.GetProposer(), "blocknum", blkData.GetBlockNum(), "hash", blkData.Block.Hash().String()) 232 if blkData.GetProposer() == pool.earthNode() { 233 //log.Info("BlockStore setBlockSealed update epochBegin", "pre", pool.epochEnd, "cur", blockNum) 234 pool.epochEnd = blockNum 235 //if pool.earthNode != pool.accountStr { 236 // if pool.epochNotify != nil { 237 // select { 238 // case <-pool.epochNotify: 239 // default: 240 // close(pool.epochNotify) 241 // } 242 // } 243 //} 244 } 245 246 } 247 248 func (pool *BlockStore) EpochBegin() uint64 { 249 pool.RLock() 250 defer pool.RUnlock() 251 252 return pool.epochEnd 253 } 254 255 func (pool *BlockStore) GetCurStars() []string { 256 return node.GurStars() 257 } 258 259 func (pool *BlockStore) isEarth() bool { 260 pool.RLock() 261 defer pool.RUnlock() 262 263 return pool.earthNode() == pool.accountStr 264 } 265 266 func (pool *BlockStore) earthNode() string { 267 return node.CurEarth() 268 } 269 270 func (pool *BlockStore) computeFairs(preEpbegin, epEnd uint64, curStars []string) []string { 271 //fmt.Println("BlockStore computeFairs----------------------------->>>", preEpbegin, epEnd) 272 failers := make([]string, 0) 273 defer func() { 274 //fmt.Println("BlockStore computeFairs----------------------------->>>end", failers) 275 }() 276 if preEpbegin == 0 { 277 return failers 278 } 279 m, _ := CalcStellar(float64(len(curStars))) 280 for i := epEnd - 1; i > preEpbegin; i-- { 281 if blk, err := pool.db.GetBlockByHeight(i); err == nil { 282 curBlock, _ := comm.InitVbftBlock(blk) 283 if i == preEpbegin+1 { 284 for j := 0; j < int(curBlock.GetViews()); j++ { 285 failers = append(failers, curStars[j*m:(j+1)*m]...) 286 } 287 } else { 288 pre, _ := pool.db.GetBlockByHeight(i - 1) 289 preBlock, _ := comm.InitVbftBlock(pre) 290 for j := preBlock.GetViews() + 1; j < curBlock.GetViews(); j++ { 291 failers = append(failers, curStars[int(j)*m:(int(j)+1)*m]...) 292 } 293 } 294 for _, pubKey := range curStars[int(curBlock.GetViews())*m : int(curBlock.GetViews()+1)*m] { 295 //fmt.Println("BlockStore computeFairs1======>>>------>>>", m, pubKey, curBlock.getViews()) 296 var isFound bool = false 297 for _, data := range curBlock.Block.Sigs.ProcSigs { 298 p, _ := GetNode(curStars, int(byte2Int(data[0:2]))) 299 //fmt.Println("BlockStore computeFairs======>>>------>>>", p, byte2Int(data[0:2])) 300 if pubKey == p { 301 isFound = true 302 break 303 } 304 } 305 if !isFound { 306 failers = append(failers, pubKey) 307 } 308 } 309 } 310 } 311 312 return failers 313 } 314 315 func (pool *BlockStore) preEpheight(epBegin uint64) uint64 { 316 for i := epBegin; i > 0; i-- { 317 block, _ := pool.db.GetBlockByHeight(i) 318 bk, _ := comm.InitVbftBlock(block) 319 if bk.GetProposer() == pool.earthNode() { 320 return bk.GetBlockNum() 321 } 322 } 323 return 0 324 } 325 326 func (pool *BlockStore) inFailers(epEnd uint64, pub string) bool { 327 pool.Lock() 328 defer pool.Unlock() 329 //fmt.Println("BlockStore inFailers----------------------------->>>", pool.epochEnd, epEnd, pub) 330 331 if _, ok := pool.failersMap[epEnd]; !ok { 332 if pool.epochEnd < epEnd { 333 return false 334 } else if pool.epochEnd >= epEnd { 335 curStars := pool.GetCurStars() 336 pool.failersMap[epEnd] = make(map[string]struct{}) 337 for _, pub := range pool.computeFairs(pool.preEpheight(epEnd-1), epEnd, curStars) { 338 pool.failersMap[epEnd][pub] = struct{}{} 339 } 340 } 341 } 342 /*for k, _ := range pool.failersMap[epEnd] { 343 fmt.Println("BlockStore inFailers>>>", epEnd, k) 344 }*/ 345 if _, ok := pool.failersMap[epEnd][pub]; !ok { 346 return false 347 } 348 return true 349 } 350 351 func (pool *BlockStore) close() { 352 } 353 354 func (pool *BlockStore) sealBlock(block *comm.Block) error { 355 log.Info("func dpoa store sealBlock", "blockHeight", block.Block.Header.Height, "txlen", block.Block.Transactions.Len()) 356 sealedBlkNum := block.GetBlockNum() 357 if sealedBlkNum < pool.getLatestBlockNumber()+1 { 358 // we already in future round 359 log.Error("late seal of , current blkNum", "sealedBlkNum", sealedBlkNum, "getLatestBlockNumber", pool.getLatestBlockNumber()+1) 360 return nil 361 } else if sealedBlkNum > pool.getLatestBlockNumber()+1 { 362 // we have lost sync, restarting syncing 363 //self.restartSyncing() 364 return fmt.Errorf("future seal of %d, current blknum: %d", sealedBlkNum, pool.getLatestBlockNumber()+1) 365 } 366 367 if err := pool.setBlockSealed(block); err != nil { 368 return fmt.Errorf("failed to seal block: %s", err) 369 } 370 371 372 blk, _ := pool.getSealedBlock(sealedBlkNum) 373 h := blk.Block.Hash() 374 prevBlkHash := block.GetPrevBlockHash() 375 log.Info("func dpoa store sealBlock server sealed block proposer prevhash hash", "accountStr", pool.accountStr, 376 "sealedBlkNum", sealedBlkNum, "proposer", block.GetProposer(), "prevhash", prevBlkHash, "hash", h) 377 378 return nil 379 } 380 381 func (pool *BlockStore) nonConsensusNode() bool { 382 for _, v := range pool.GetCurStars() { 383 if pool.accountStr == v { 384 return false 385 } 386 } 387 return true 388 }