github.com/deroproject/derosuite@v2.1.6-1.0.20200307070847-0f2e589c7a2b+incompatible/blockchain/blockchain.go (about)

     1  // Copyright 2017-2018 DERO Project. All rights reserved.
     2  // Use of this source code in any form is governed by RESEARCH license.
     3  // license can be found in the LICENSE file.
     4  // GPG: 0F39 E425 8C65 3947 702A  8234 08B2 0360 A03A 9DE8
     5  //
     6  //
     7  // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
     8  // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     9  // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
    10  // THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    11  // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
    12  // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
    13  // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
    14  // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
    15  // THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    16  
    17  package blockchain
    18  
    19  // This file runs the core consensus protocol
    20  // please think before randomly editing for after effects
    21  // We must not call any packages that can call panic
    22  // NO Panics or FATALs please
    23  
    24  import "os"
    25  import "fmt"
    26  import "sort"
    27  import "sync"
    28  import "bufio"
    29  import "time"
    30  import "bytes"
    31  import "runtime"
    32  import "math/big"
    33  import "sync/atomic"
    34  import "runtime/debug"
    35  
    36  import "golang.org/x/crypto/sha3"
    37  import "github.com/romana/rlog"
    38  import log "github.com/sirupsen/logrus"
    39  import "github.com/golang/groupcache/lru"
    40  import hashicorp_lru "github.com/hashicorp/golang-lru"
    41  
    42  import "github.com/deroproject/derosuite/config"
    43  import "github.com/deroproject/derosuite/crypto"
    44  import "github.com/deroproject/derosuite/errormsg"
    45  import "github.com/prometheus/client_golang/prometheus"
    46  
    47  //import "github.com/deroproject/derosuite/address"
    48  import "github.com/deroproject/derosuite/emission"
    49  import "github.com/deroproject/derosuite/globals"
    50  import "github.com/deroproject/derosuite/storage"
    51  import "github.com/deroproject/derosuite/crypto/ringct"
    52  import "github.com/deroproject/derosuite/block"
    53  import "github.com/deroproject/derosuite/transaction"
    54  import "github.com/deroproject/derosuite/checkpoints"
    55  import "github.com/deroproject/derosuite/metrics"
    56  import "github.com/deroproject/derosuite/blockchain/mempool"
    57  import "github.com/deroproject/derosuite/blockchain/inputmaturity"
    58  
    59  // all components requiring access to blockchain must use , this struct to communicate
    60  // this structure must be update while mutex
    61  type Blockchain struct {
    62  	store       storage.Store // interface to storage layer
    63  	Height      int64         // chain height is always 1 more than block
    64  	height_seen int64         // height seen on peers
    65  	Top_ID      crypto.Hash   // id of the top block
    66  	//Tips              map[crypto.Hash]crypto.Hash // current tips
    67  	dag_unsettled              map[crypto.Hash]bool // current unsettled dag
    68  	dag_past_unsettled_cache   *lru.Cache
    69  	dag_future_unsettled_cache *lru.Cache
    70  	lrucache_workscore         *lru.Cache
    71  	lrucache_fullorder         *lru.Cache // keeps full order for  tips upto a certain height
    72  
    73  	MINING_BLOCK bool // used to pause mining
    74  
    75  	Difficulty        uint64 // current cumulative difficulty
    76  	Median_Block_Size uint64 // current median block size
    77  	Mempool           *mempool.Mempool
    78  	Exit_Event        chan bool // blockchain is shutting down and we must quit ASAP
    79  
    80  	Top_Block_Median_Size uint64 // median block size of current top block
    81  	Top_Block_Base_Reward uint64 // top block base reward
    82  
    83  	checkpints_disabled bool // are checkpoints disabled
    84  	simulator           bool // is simulator mode
    85  
    86  	P2P_Block_Relayer func(*block.Complete_Block, uint64) // tell p2p to broadcast any block this daemon hash found
    87  
    88  	sync.RWMutex
    89  }
    90  
    91  var logger *log.Entry
    92  
    93  //var Exit_Event = make(chan bool) // causes all threads to exit
    94  
    95  // All blockchain activity is store in a single
    96  
    97  /* do initialisation , setup storage, put genesis block and chain in store
    98     This is the first component to get up
    99     Global parameters are picked up  from the config package
   100  */
   101  
   102  func Blockchain_Start(params map[string]interface{}) (*Blockchain, error) {
   103  
   104  	var err error
   105  	var chain Blockchain
   106  
   107  	logger = globals.Logger.WithFields(log.Fields{"com": "BLKCHAIN"})
   108  	logger.Infof("Initialising blockchain")
   109  	//init_static_checkpoints()           // init some hard coded checkpoints
   110  	checkpoints.LoadCheckPoints(logger) // load checkpoints from file if provided
   111  
   112  	if params["--simulator"] == true { // simulator always uses boltdb backend
   113  		chain.store = storage.Bolt_backend // setup backend
   114  		chain.store.Init(params)           // init backend
   115  
   116  	} else {
   117  		if (runtime.GOARCH == "amd64" && !globals.Arguments["--badgerdb"].(bool)) || globals.Arguments["--boltdb"].(bool) {
   118  			chain.store = storage.Bolt_backend // setup backend
   119  			chain.store.Init(params)           // init backend
   120  		} else {
   121  			chain.store = storage.Badger_backend // setup backend
   122  			chain.store.Init(params)             // init backend
   123  		}
   124  	}
   125  
   126  	/*
   127  
   128  		   logger.Infof("%+v", *storage.MySQL_backend)
   129  		   chain.store = storage.MySQL_backend
   130  		   chain.store.Init(params)
   131  		   logger.Infof("%+v", *storage.MySQL_backend)
   132  
   133  		//       xyz := storage.MySQL_backend
   134  		//xyz.Init(params)
   135  
   136  		if err != nil {
   137  			logger.Infof("Error Initialising blockchain mysql backend , err %s", err)
   138  			return nil, err
   139  		}
   140  
   141  	*/
   142  	//chain.Tips = map[crypto.Hash]crypto.Hash{} // initialize Tips map
   143  	chain.lrucache_workscore = lru.New(8191)  // temporary cache for work caclculation
   144  	chain.lrucache_fullorder = lru.New(20480) // temporary cache for fullorder caclculation
   145  
   146  	if globals.Arguments["--disable-checkpoints"] != nil {
   147  		chain.checkpints_disabled = globals.Arguments["--disable-checkpoints"].(bool)
   148  	}
   149  
   150  	if params["--simulator"] == true {
   151  		chain.simulator = true // enable simulator mode, this will set hard coded difficulty to 1
   152  	}
   153  
   154  	chain.Exit_Event = make(chan bool) // init exit channel
   155  
   156  	// init mempool before chain starts
   157  	chain.Mempool, err = mempool.Init_Mempool(params)
   158  
   159  	// we need to check mainnet/testnet check whether the genesis block matches the testnet/mainet
   160  	// mean whether the user is trying to use mainnet db with testnet option or vice-versa
   161  	if chain.Block_Exists(nil, config.Mainnet.Genesis_Block_Hash) || chain.Block_Exists(nil, config.Testnet.Genesis_Block_Hash) {
   162  
   163  		if globals.IsMainnet() && !chain.Block_Exists(nil, config.Mainnet.Genesis_Block_Hash) {
   164  			logger.Fatalf("Tryng to use a testnet database with mainnet, please add --testnet option")
   165  		}
   166  
   167  		if !globals.IsMainnet() && !chain.Block_Exists(nil, config.Testnet.Genesis_Block_Hash) {
   168  			logger.Fatalf("Tryng to use a mainnet database with testnet, please remove --testnet option")
   169  		}
   170  
   171  		// check if user is trying to load previous testnet DB with , reject
   172  		if !globals.IsMainnet() && chain.Block_Exists(nil,crypto.HashHexToHash("4dfc6daa5e104250125e0a14b74eca04730fd5bec4e826fa54f791245aa924f2")) { 
   173  			logger.Warnf("Please delete existing testnet DB as testnet has boostrapped")
   174  			return nil, fmt.Errorf("Please delete existing testnet DB.")
   175  		}
   176  
   177  	}
   178  
   179  	// genesis block not in chain, add it to chain, together with its miner tx
   180  	// make sure genesis is in the store
   181  	bl := Generate_Genesis_Block()
   182  	//if !chain.Block_Exists(globals.Config.Genesis_Block_Hash) {
   183  	if !chain.Block_Exists(nil, bl.GetHash()) {
   184  		//chain.Store_TOP_ID(globals.Config.Genesis_Block_Hash) // store top id , exception of genesis block
   185  		logger.Debugf("Genesis block not in store, add it now")
   186  		var complete_block block.Complete_Block
   187  		//bl := Generate_Genesis_Block()
   188  		complete_block.Bl = &bl
   189  
   190  		/*if !chain.Add_Complete_Block(&complete_block) {
   191  			logger.Fatalf("Failed to add genesis block, we can no longer continue")
   192  		}*/
   193  
   194  		logger.Infof("Added block successfully")
   195  
   196  		//chain.store_Block_Settled(bl.GetHash(),true) // genesis block is always settled
   197  
   198  		dbtx, err := chain.store.BeginTX(true)
   199  		if err != nil {
   200  			logger.Warnf("Could NOT add block to chain. Error opening writable TX, err %s", err)
   201  			// return
   202  		}
   203  		chain.Store_BL(dbtx, &bl)
   204  
   205  		bl_current_hash := bl.GetHash()
   206  		// store total  reward
   207  		dbtx.StoreUint64(BLOCKCHAIN_UNIVERSE, GALAXY_BLOCK, bl_current_hash[:], PLANET_MINERTX_REWARD, bl.Miner_TX.Vout[0].Amount)
   208  
   209  		// store base reward
   210  		dbtx.StoreUint64(BLOCKCHAIN_UNIVERSE, GALAXY_BLOCK, bl_current_hash[:], PLANET_BASEREWARD, bl.Miner_TX.Vout[0].Amount)
   211  
   212  		// store total generated coins
   213  		// this is hardcoded at initial chain import, keeping original emission schedule
   214  		if globals.IsMainnet(){
   215  				dbtx.StoreUint64(BLOCKCHAIN_UNIVERSE, GALAXY_BLOCK, bl_current_hash[:], PLANET_ALREADY_GENERATED_COINS, config.MAINNET_HARDFORK_1_TOTAL_SUPPLY)		
   216  			}else{
   217  				dbtx.StoreUint64(BLOCKCHAIN_UNIVERSE, GALAXY_BLOCK, bl_current_hash[:], PLANET_ALREADY_GENERATED_COINS, config.TESTNET_HARDFORK_1_TOTAL_SUPPLY)		
   218  			}
   219  		
   220  
   221  		chain.Store_Block_Topological_order(dbtx, bl.GetHash(), 0) // genesis block is the lowest
   222  		chain.Store_TOPO_HEIGHT(dbtx, 0)                           //
   223  		chain.Store_TOP_HEIGHT(dbtx, 0)
   224  
   225  		chain.store_TIPS(dbtx, []crypto.Hash{bl.GetHash()})
   226  
   227  		dbtx.Commit()
   228  
   229  	}
   230  
   231  	//fmt.Printf("Genesis Block should be present at height 0\n")
   232  	/*blocks := chain.Get_Blocks_At_Height(0)
   233  	  fmt.Printf("blocks at height 0 %+v\n", blocks)
   234  
   235  	  fmt.Printf("Past of  genesis %+v\n", chain.Get_Block_Past(bl.GetHash()))
   236  	  fmt.Printf("Future of  genesis %+v\n", chain.Get_Block_Future(bl.GetHash()))
   237  
   238  	  fmt.Printf("Future of  zero block  %+v\n", chain.Get_Block_Future(ZERO_HASH))
   239  	*/
   240  
   241  	// load the chain from the disk
   242  	chain.Initialise_Chain_From_DB()
   243  
   244  	//   logger.Fatalf("Testing complete quitting")
   245  
   246  	// hard forks must be initialized after chain is up
   247  	init_hard_forks(params)
   248  
   249  	go clean_up_valid_cache() // clean up valid cache
   250  
   251  	/*  txlist := chain.Mempool.Mempool_List_TX()
   252  	    for i := range txlist {
   253  	       // if fmt.Sprintf("%s", txlist[i]) == "0fe0e7270ba911956e91d9ea099e4d12aa1bce2473d4064e239731bc37acfd86"{
   254  	        logger.Infof("Verifying tx %s %+v", txlist[i], chain.Verify_Transaction_NonCoinbase(chain.Mempool.Mempool_Get_TX(txlist[i])))
   255  
   256  	        //}
   257  	        //p2p.Broadcast_Tx(chain.Mempool.Mempool_Get_TX(txlist[i]))
   258  	    }
   259  	*/
   260  
   261  	if chain.checkpints_disabled {
   262  		logger.Infof("Internal Checkpoints are disabled")
   263  	} else {
   264  		logger.Debugf("Internal Checkpoints are enabled")
   265  	}
   266  
   267  	_ = err
   268  
   269  	atomic.AddUint32(&globals.Subsystem_Active, 1) // increment subsystem
   270  
   271  	// register the metrics with the metrics registry
   272  	metrics.Registry.MustRegister(blockchain_tx_counter)
   273  	metrics.Registry.MustRegister(mempool_tx_counter)
   274  	metrics.Registry.MustRegister(mempool_tx_count)
   275  	metrics.Registry.MustRegister(block_size)
   276  	metrics.Registry.MustRegister(transaction_size)
   277  	metrics.Registry.MustRegister(block_tx_count)
   278  	metrics.Registry.MustRegister(block_processing_time)
   279  
   280  	return &chain, nil
   281  }
   282  
   283  // this function is called to read blockchain state from DB
   284  // It is callable at any point in time
   285  func (chain *Blockchain) Initialise_Chain_From_DB() {
   286  	chain.Lock()
   287  	defer chain.Unlock()
   288  
   289  	// locate top block
   290  	/*
   291  		chain.Top_ID = chain.Load_TOP_ID()
   292  		//chain.Height = (chain.Load_Height_for_BL_ID(chain.Top_ID) + 1)
   293  		chain.Difficulty = chain.Get_Difficulty()
   294  		chain.Top_Block_Median_Size = chain.Get_Median_BlockSize_At_Block(chain.Top_ID)
   295  		chain.Top_Block_Base_Reward = chain.Load_Block_Reward(chain.Top_ID)
   296  		// set it so it is not required to be calculated frequently
   297  		chain.Median_Block_Size = chain.Get_Median_BlockSize_At_Block(chain.Get_Top_ID())
   298  		if chain.Median_Block_Size < config.CRYPTONOTE_BLOCK_GRANTED_FULL_REWARD_ZONE {
   299  			chain.Median_Block_Size = config.CRYPTONOTE_BLOCK_GRANTED_FULL_REWARD_ZONE
   300  		}
   301  	*/
   302  	// find the tips from the chain , first by reaching top height
   303  	// then downgrading to top-10 height
   304  	// then reworking the chain to get the tip
   305  	best_height := chain.Load_TOP_HEIGHT(nil)
   306  	chain.Height = best_height
   307  
   308  	// reload tips from disk
   309  	tips := chain.load_TIPS(nil)
   310  
   311  	// get dag unsettled, it's only possible when we have the tips
   312  	// chain.dag_unsettled = chain.Get_DAG_Unsettled() // directly off the disk
   313  
   314  	logger.Infof("Chain Tips  %+v Height %d", tips, chain.Height)
   315  
   316  }
   317  
   318  // before shutdown , make sure p2p is confirmed stopped
   319  func (chain *Blockchain) Shutdown() {
   320  
   321  	chain.Lock()            // take the lock as chain is no longer in unsafe mode
   322  	close(chain.Exit_Event) // send signal to everyone we are shutting down
   323  
   324  	chain.Mempool.Shutdown() // shutdown mempool first
   325  
   326  	logger.Infof("Stopping Blockchain")
   327  	chain.store.Shutdown()
   328  	atomic.AddUint32(&globals.Subsystem_Active, ^uint32(0)) // this decrement 1 fom subsystem
   329  }
   330  
   331  // get top unstable height
   332  // this is obtained by  getting the highest topo block and getting its height
   333  func (chain *Blockchain) Get_Height() int64 {
   334  
   335  	topo_height := chain.Load_TOPO_HEIGHT(nil)
   336  
   337  	blid, err := chain.Load_Block_Topological_order_at_index(nil, topo_height)
   338  	if err != nil {
   339  		logger.Warnf("Cannot get block  at topoheight %d err: %s", topo_height, err)
   340  		return 0
   341  	}
   342  
   343  	height := chain.Load_Height_for_BL_ID(nil, blid)
   344  
   345  	//return atomic.LoadUint64(&chain.Height)
   346  	return height
   347  }
   348  
   349  // get height where chain is now stable
   350  func (chain *Blockchain) Get_Stable_Height() int64 {
   351  
   352  	dbtx, err := chain.store.BeginTX(false)
   353  	if err != nil {
   354  		logger.Warnf("Could NOT add block to chain. Error opening writable TX, err %s", err)
   355  		return -1
   356  	}
   357  
   358  	defer dbtx.Rollback()
   359  
   360  	tips := chain.Get_TIPS()
   361  	base, base_height := chain.find_common_base(dbtx, tips)
   362  	_ = base
   363  
   364  	return int64(base_height)
   365  }
   366  
   367  // we should be holding lock at this time, atleast read only
   368  func (chain *Blockchain) Get_TIPS() (tips []crypto.Hash) {
   369  	return chain.load_TIPS(nil)
   370  }
   371  
   372  func (chain *Blockchain) Get_Top_ID() crypto.Hash {
   373  	topo_height := chain.Load_TOPO_HEIGHT(nil)
   374  
   375  	blid, err := chain.Load_Block_Topological_order_at_index(nil, topo_height)
   376  	if err != nil {
   377  		logger.Warnf("Cannot get block  at topoheight %d err: %s", topo_height, err)
   378  		return blid
   379  	}
   380  
   381  	return blid
   382  }
   383  
   384  func (chain *Blockchain) Get_Difficulty() uint64 {
   385  	return chain.Get_Difficulty_At_Tips(nil, chain.Get_TIPS()).Uint64()
   386  }
   387  
   388  func (chain *Blockchain) Get_Cumulative_Difficulty() uint64 {
   389  
   390  	/*
   391  		topo_height := chain.Load_TOPO_HEIGHT(nil)
   392  
   393  		blid, err := chain.Load_Block_Topological_order_at_index(nil, topo_height)
   394  		if err != nil {
   395  			logger.Warnf("Cannot get block  at topoheight %d err: %s",topo_height,err)
   396  			return 0
   397  		}
   398  
   399  		past := chain.Get_Block_Past(nil,blid)
   400  		return  chain.Get_Difficulty_At_Tips(nil, past,uint64(uint64(time.Now().UTC().Unix())+config.BLOCK_TIME)).Uint64()
   401  
   402  	*/
   403  
   404  	return 0 //chain.Load_Block_Cumulative_Difficulty(chain.Top_ID)
   405  }
   406  
   407  func (chain *Blockchain) Get_Median_Block_Size() uint64 { // get current cached median size
   408  	return chain.Median_Block_Size
   409  }
   410  
   411  func (chain *Blockchain) Get_Network_HashRate() uint64 {
   412  	return chain.Get_Difficulty() / chain.Get_Current_BlockTime()
   413  }
   414  
   415  // confirm whether the block exist in the data
   416  // this only confirms whether the block has been downloaded
   417  // a separate check is required, whether the block is valid ( satifies PoW and other conditions)
   418  // we will not add a block to store, until it satisfies PoW
   419  func (chain *Blockchain) Block_Exists(dbtx storage.DBTX, h crypto.Hash) bool {
   420  	_, err := chain.Load_BL_FROM_ID(dbtx, h)
   421  	if err == nil {
   422  		return true
   423  	}
   424  	return false
   425  }
   426  
   427  // various counters/gauges which track a numer of metrics
   428  // such as number of txs, number of inputs, number of outputs
   429  // mempool total addition, current mempool size
   430  // block processing time etcs
   431  
   432  // Try it once more, this time with a help string.
   433  var blockchain_tx_counter = prometheus.NewCounter(prometheus.CounterOpts{
   434  	Name: "blockchain_tx_counter",
   435  	Help: "Number of tx mined",
   436  })
   437  
   438  var mempool_tx_counter = prometheus.NewCounter(prometheus.CounterOpts{
   439  	Name: "mempool_tx_counter",
   440  	Help: "Total number of tx added in mempool",
   441  })
   442  var mempool_tx_count = prometheus.NewGauge(prometheus.GaugeOpts{
   443  	Name: "mempool_tx_count",
   444  	Help: "Number of tx in mempool at this point",
   445  })
   446  
   447  //  track block size about 2 MB
   448  var block_size = prometheus.NewHistogram(prometheus.HistogramOpts{
   449  	Name:    "block_size_byte",
   450  	Help:    "Block size in byte (complete)",
   451  	Buckets: prometheus.LinearBuckets(0, 102400, 10), // start block size 0, each 1 KB step,  2048 such buckets .
   452  })
   453  
   454  //  track transaction size upto 500 KB
   455  var transaction_size = prometheus.NewHistogram(prometheus.HistogramOpts{
   456  	Name:    "tx_size_byte",
   457  	Help:    "TX size in byte",
   458  	Buckets: prometheus.LinearBuckets(0, 10240, 16), // start 0  byte, each 1024 byte,  512 such buckets.
   459  })
   460  
   461  //  number of tx per block
   462  var block_tx_count = prometheus.NewHistogram(prometheus.HistogramOpts{
   463  	Name:    "block_tx_count",
   464  	Help:    "Number of TX in the block",
   465  	Buckets: prometheus.LinearBuckets(0, 20, 25), // start 0  byte, each 1024 byte,  1024 such buckets.
   466  })
   467  
   468  //
   469  var block_processing_time = prometheus.NewHistogram(prometheus.HistogramOpts{
   470  	Name:    "block_processing_time_ms",
   471  	Help:    "Block processing time milliseconds",
   472  	Buckets: prometheus.LinearBuckets(0, 100, 20), // start 0  ms, each 100 ms,  200 such buckets.
   473  })
   474  
   475  // this is the only entrypoint for new txs in the chain
   476  // add a transaction to MEMPOOL,
   477  // verifying everything  means everything possible
   478  // this only change mempool, no DB changes
   479  func (chain *Blockchain) Add_TX_To_Pool(tx *transaction.Transaction) (result bool) {
   480  
   481  	// chain lock is no longer required as we only do readonly processing
   482  	//	chain.Lock()
   483  	//	defer chain.Unlock()
   484  
   485  	dbtx, err := chain.store.BeginTX(false)
   486  	if err != nil {
   487  		logger.Warnf("Could NOT create DB transaction  err %s", err)
   488  		return true // just make it rebroadcast
   489  	}
   490  
   491  	// track counter for the amount of mempool tx
   492  	defer mempool_tx_count.Set(float64(len(chain.Mempool.Mempool_List_TX())))
   493  
   494  	defer dbtx.Rollback()
   495  
   496  	txhash := tx.GetHash()
   497  
   498  	// Coin base TX can not come through this path
   499  	if tx.IsCoinbase() {
   500  		logger.WithFields(log.Fields{"txid": txhash}).Warnf("TX rejected  coinbase tx cannot appear in mempool")
   501  		return false
   502  	}
   503  
   504  	// quick check without calculating everything whether tx is in pool, if yes we do nothing
   505  	if chain.Mempool.Mempool_TX_Exist(txhash) {
   506  		rlog.Tracef(2,"TX %s rejected Already in MEMPOOL", txhash)
   507  		return true
   508  	}
   509  
   510  	hf_version := chain.Get_Current_Version_at_Height(chain.Get_Height())
   511  
   512  	// TODO if someone is relaying existing tx again and again, we need to quickly figure it and avoid expensive verification
   513  	// a simple technique seems to be to do key image verification for double spend, if it's reject
   514  	// this test is placed to avoid ring signature verification cost for faulty tx as early as possible
   515  	if !chain.Verify_Transaction_NonCoinbase_DoubleSpend_Check(dbtx, tx) { // BUG BUG BUG we must use dbtx to confirm
   516  		rlog.Tracef(2,"TX %s rejected due to double spending", txhash)
   517  		return false
   518  	}
   519  
   520  	// if TX is too big, then it cannot be mined due to fixed block size, reject such TXs here
   521  	// currently, limits are  as per consensus
   522  	if uint64(len(tx.Serialize())) > config.CRYPTONOTE_MAX_TX_SIZE {
   523  		logger.WithFields(log.Fields{"txid": txhash}).Warnf("TX rejected  Size %d byte Max possible %d", len(tx.Serialize()), config.CRYPTONOTE_MAX_TX_SIZE)
   524  		return false
   525  	}
   526  
   527  	// check whether enough fees is provided in the transaction
   528  	calculated_fee := chain.Calculate_TX_fee(hf_version, uint64(len(tx.Serialize())))
   529  	provided_fee := tx.RctSignature.Get_TX_Fee() // get fee from tx
   530  
   531  	if calculated_fee > provided_fee { // 2 % margin see blockchain.cpp L 2913
   532  		logger.WithFields(log.Fields{"txid": txhash}).Warnf("TX rejected due to low fees  provided fee %d calculated fee %d", provided_fee, calculated_fee)
   533  
   534  		rlog.Warnf("TX  %s rejected due to low fees  provided fee %d calculated fee %d", txhash, provided_fee, calculated_fee)
   535  		return false
   536  	}
   537  
   538  	if chain.Verify_Transaction_NonCoinbase(dbtx, hf_version, tx) && chain.Verify_Transaction_NonCoinbase_DoubleSpend_Check(dbtx, tx) {
   539  		if chain.Mempool.Mempool_Add_TX(tx, 0) { // new tx come with 0 marker
   540  			rlog.Tracef(2,"Successfully added tx %s to pool", txhash)
   541  
   542  			mempool_tx_counter.Inc()
   543  			return true
   544  		} else {
   545  			rlog.Tracef(2,"TX %s rejected by pool", txhash)
   546  			return false
   547  		}
   548  	}
   549  
   550  	rlog.Warnf("Incoming TX %s could not be verified", txhash)
   551  	return false
   552  
   553  }
   554  
   555  // structure used to rank/sort  blocks on a number of factors
   556  type BlockScore struct {
   557  	BLID crypto.Hash
   558  	// Weight uint64
   559  	Height                int64    // block height
   560  	Cumulative_Difficulty *big.Int // used to score blocks on cumulative difficulty
   561  }
   562  
   563  // Heighest node weight is ordered first,  the condition is reverted see eg. at https://golang.org/pkg/sort/#Slice
   564  //  if weights are equal, nodes are sorted by their block ids which will never collide , hopefullly
   565  // block ids are sorted by lowest byte first diff
   566  func sort_descending_by_cumulative_difficulty(tips_scores []BlockScore) {
   567  
   568  	sort.Slice(tips_scores, func(i, j int) bool {
   569  		if tips_scores[i].Cumulative_Difficulty.Cmp(tips_scores[j].Cumulative_Difficulty) != 0 { // if diffculty mismatch use them
   570  
   571  			if tips_scores[i].Cumulative_Difficulty.Cmp(tips_scores[j].Cumulative_Difficulty) > 0 { // if i diff >  j diff
   572  				return true
   573  			} else {
   574  				return false
   575  			}
   576  
   577  		} else {
   578  			return bytes.Compare(tips_scores[i].BLID[:], tips_scores[j].BLID[:]) == -1
   579  		}
   580  	})
   581  }
   582  
   583  func sort_ascending_by_height(tips_scores []BlockScore) {
   584  
   585  	// base is the lowest height
   586  	sort.Slice(tips_scores, func(i, j int) bool { return tips_scores[i].Height < tips_scores[j].Height })
   587  
   588  }
   589  
   590  // this will sort the tips based on cumulative difficulty and/or block ids
   591  // the tips will sorted in descending order
   592  func (chain *Blockchain) SortTips(dbtx storage.DBTX, tips []crypto.Hash) (sorted []crypto.Hash) {
   593  	if len(tips) == 0 {
   594  		panic("tips cannot be 0")
   595  	}
   596  	if len(tips) == 1 {
   597  		sorted = []crypto.Hash{tips[0]}
   598  		return
   599  	}
   600  
   601  	tips_scores := make([]BlockScore, len(tips), len(tips))
   602  	for i := range tips {
   603  		tips_scores[i].BLID = tips[i]
   604  		tips_scores[i].Cumulative_Difficulty = chain.Load_Block_Cumulative_Difficulty(dbtx, tips[i])
   605  	}
   606  
   607  	sort_descending_by_cumulative_difficulty(tips_scores)
   608  
   609  	for i := range tips_scores {
   610  		sorted = append(sorted, tips_scores[i].BLID)
   611  	}
   612  	return
   613  }
   614  
   615  
   616  // side blocks are blocks which lost the race the to become part
   617  // of main chain, but there transactions are honoured,
   618  // they are given 67 % reward
   619  // a block is a side block if it satisfies the following condition
   620  // if  block height   is less than or equal to height of past 8 topographical blocks
   621  // this is part of consensus rule
   622  // this is the topoheight of this block itself
   623  func (chain *Blockchain) isblock_SideBlock(dbtx storage.DBTX, blid crypto.Hash, block_topoheight int64) (result bool) {
   624  
   625  	if block_topoheight <= 2 {
   626  		return false
   627  	}
   628  	// lower reward for byzantine behaviour
   629  	// for as many block as added
   630  	block_height := chain.Load_Height_for_BL_ID(dbtx, blid)
   631  
   632  	counter := int64(0)
   633  	for i := block_topoheight - 1; i >= 0 && counter < config.STABLE_LIMIT; i-- {
   634  		counter++
   635  
   636  		previous_blid, err := chain.Load_Block_Topological_order_at_index(dbtx, i)
   637  		if err != nil {
   638  			panic("Could not load block from previous order")
   639  		}
   640  		// height of previous topo ordered block
   641  		previous_height := chain.Load_Height_for_BL_ID(dbtx, previous_blid)
   642  
   643  		if block_height <= previous_height { // lost race (or byzantine behaviour)
   644  			return true // give only 67 % reward
   645  		}
   646  
   647  	}
   648  
   649  	return false
   650  }
   651  
   652  // this is the only entrypoint for new / old blocks even for genesis block
   653  // this will add the entire block atomically to the chain
   654  // this is the only function which can add blocks to the chain
   655  // this is exported, so ii can be fed new blocks by p2p layer
   656  // genesis block is no different
   657  // TODO: we should stop mining while adding the new block
   658  func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err error, result bool) {
   659  
   660  	var block_hash crypto.Hash
   661  	chain.Lock()
   662  	defer chain.Unlock()
   663  	result = false
   664  
   665  	dbtx, err := chain.store.BeginTX(true)
   666  	if err != nil {
   667  		logger.Warnf("Could NOT add block to chain. Error opening writable TX, err %s", err)
   668  		return errormsg.ErrInvalidStorageTX, false
   669  	}
   670  
   671  	chain.MINING_BLOCK = true
   672  
   673  	processing_start := time.Now()
   674  
   675  	//old_top := chain.Load_TOP_ID() // store top as it may change
   676  	defer func() {
   677  
   678  		// safety so if anything wrong happens, verification fails
   679  		if r := recover(); r != nil {
   680  			logger.Warnf("Recovered while adding new block, Stack trace below block_hash %s", block_hash)
   681  			logger.Warnf("Stack trace  \n%s", debug.Stack())
   682  			result = false
   683  			err = errormsg.ErrPanic
   684  		}
   685  
   686  		chain.MINING_BLOCK = false
   687  
   688  		if result == true { // block was successfully added, commit it atomically
   689  			dbtx.Commit()
   690  
   691  			rlog.Infof("Block successfully acceppted by chain %s", block_hash)
   692  
   693  			// gracefully try to instrument
   694  			func() {
   695  				defer func() {
   696  					if r := recover(); r != nil {
   697  						rlog.Warnf("Recovered while instrumenting")
   698  						rlog.Warnf("Stack trace \n%s", debug.Stack())
   699  
   700  					}
   701  				}()
   702  				blockchain_tx_counter.Add(float64(len(cbl.Bl.Tx_hashes)))
   703  				block_tx_count.Observe(float64(len(cbl.Bl.Tx_hashes)))
   704  				block_processing_time.Observe(float64(time.Now().Sub(processing_start).Round(time.Millisecond) / 1000000))
   705  
   706  				// tracks counters for tx_size
   707  
   708  				{
   709  					complete_block_size := 0
   710  					for i := 0; i < len(cbl.Txs); i++ {
   711  						tx_size := len(cbl.Txs[i].Serialize())
   712  						complete_block_size += tx_size
   713  						transaction_size.Observe(float64(tx_size))
   714  					}
   715  					block_size.Observe(float64(complete_block_size))
   716  				}
   717  			}()
   718  
   719  			//dbtx.Sync() // sync the DB to disk after every execution of this function
   720  
   721  			//if old_top != chain.Load_TOP_ID() { // if top has changed, discard mining templates and start afresh
   722  			// TODO discard mining templates or something else, if top chnages requires some action
   723  
   724  			//}
   725  		} else {
   726  			dbtx.Rollback() // if block could not be added, rollback all changes to previous block
   727  			rlog.Infof("Block rejected by chain %s err %s", block_hash, err)
   728  		}
   729  	}()
   730  
   731  	bl := cbl.Bl // small pointer to block
   732  
   733  	// first of all lets do some quick checks
   734  	// before doing extensive checks
   735  	result = false
   736  
   737  	block_hash = bl.GetHash()
   738  	block_logger := logger.WithFields(log.Fields{"blid": block_hash})
   739  
   740  	// check if block already exist skip it
   741  	if chain.Block_Exists(dbtx, block_hash) {
   742  		block_logger.Debugf("block already in chain skipping it ")
   743  		return errormsg.ErrAlreadyExists, false
   744  	}
   745  
   746  	// only 3 tips allowed in block
   747  	if len(bl.Tips) >= 4 {
   748  		rlog.Warnf("More than 3 tips present in block %s rejecting", block_hash)
   749  		return errormsg.ErrPastMissing, false
   750  	}
   751  
   752  	// check whether the tips exist in our chain, if not reject
   753  	if chain.Get_Height() > 0 {
   754  		for i := range bl.Tips {
   755  			if !chain.Block_Exists(dbtx, bl.Tips[i]) {
   756  				rlog.Warnf("Tip  %s  is NOT present in chain current block %s, skipping it till we get a parent", bl.Tips[i], block_hash)
   757  				return errormsg.ErrPastMissing, false
   758  			}
   759  		}
   760  	}
   761  
   762  	
   763  
   764  	block_height := chain.Calculate_Height_At_Tips(dbtx, bl.Tips)
   765  
   766  	if block_height == 0 && bl.GetHash() != globals.Config.Genesis_Block_Hash {
   767  		block_logger.Warnf("There can can be only one genesis block, reject it, len of tips(%d)", len(bl.Tips))
   768  		return errormsg.ErrInvalidBlock, false
   769  	}
   770  	if block_height < chain.Get_Stable_Height() {
   771  		rlog.Warnf("Block %s rejected since it is stale stable height %d  block height %d", bl.GetHash(), chain.Get_Stable_Height(), block_height)
   772  		return errormsg.ErrInvalidBlock, false
   773  	}
   774  
   775  	// use checksum to quick jump
   776  	if chain.checkpints_disabled == false && checkpoints.IsCheckSumKnown(chain.BlockCheckSum(cbl)) {
   777  		rlog.Debugf("Skipping Deep Checks for block %s ", block_hash)
   778  		goto skip_checks
   779  	} else {
   780  		rlog.Debugf("Deep Checks for block %s ", block_hash)
   781  	}
   782  
   783  	// version 1 blocks ( old chain) should NOT be mined by used
   784  	// they should use hard coded checkpoints
   785  	if chain.checkpints_disabled == false && chain.Get_Current_Version_at_Height(block_height) == 1 {
   786  		logger.Warnf("v1 blocks cannot be mined (these are imported blocks), rejecting")
   787  		return errormsg.ErrInvalidBlock, false
   788  	}
   789  
   790  	/*
   791  
   792  		// check  a small list 100 hashes whether they have been reached
   793  		if IsCheckPointKnown_Static(block_hash, chain.Load_Height_for_BL_ID(bl.Prev_Hash)+1) {
   794  			logger.Infof("Static Checkpoint reached at height %d", chain.Load_Height_for_BL_ID(bl.Prev_Hash)+1)
   795  		}
   796  
   797  		rlog.Tracef(1, "Checking Known checkpoint %s at height %d", block_hash, chain.Load_Height_for_BL_ID(bl.Prev_Hash)+1)
   798  
   799  		//if we have checkpoints embedded, they must match
   800  		// until user disables this check
   801  		// skip checkpoint check for genesis block
   802  		if block_hash != globals.Config.Genesis_Block_Hash {
   803  			if chain.checkpints_disabled == false && checkpoints.Length() > chain.Load_Height_for_BL_ID(bl.Prev_Hash)+1 && !checkpoints.IsCheckPointKnown(block_hash, chain.Load_Height_for_BL_ID(bl.Prev_Hash)+1) {
   804  				block_logger.Warnf("Block hash mismatch with checkpoint height %d", chain.Load_Height_for_BL_ID(bl.Prev_Hash)+1)
   805  				return
   806  			}
   807  
   808  
   809  
   810  		}
   811  	*/
   812  
   813  	// make sure time is NOT too much into future, we have 2 seconds of margin here
   814  	// some OS have trouble syncing with more than 1 sec granularity
   815  	// if clock diff is more than   2 secs, reject the block
   816  	if bl.Timestamp > (uint64(time.Now().UTC().Unix()) + config.CRYPTONOTE_FUTURE_TIME_LIMIT) {
   817  		block_logger.Warnf("Rejecting Block, timestamp is too much into future, make sure that system clock is correct")
   818  		return errormsg.ErrFutureTimestamp, false
   819  	}
   820  
   821  	// verify that the clock is not being run in reverse
   822  	// the block timestamp cannot be less than any of the parents
   823  	for i := range bl.Tips {
   824  		if uint64(chain.Load_Block_Timestamp(dbtx, bl.Tips[i])) > bl.Timestamp {
   825  			block_logger.Warnf("Block timestamp is  less than its parent, rejecting block")
   826  			return errormsg.ErrInvalidTimestamp, false
   827  		}
   828  	}
   829  
   830  	//logger.Infof("current version %d  height %d", chain.Get_Current_Version_at_Height( 2500), chain.Calculate_Height_At_Tips(dbtx, bl.Tips))
   831  	// check whether the major version ( hard fork) is valid
   832  	if !chain.Check_Block_Version(dbtx, bl) {
   833  		block_logger.Warnf("Rejecting !! Block has invalid fork version actual %d expected %d", bl.Major_Version, chain.Get_Current_Version_at_Height(chain.Calculate_Height_At_Tips(dbtx, bl.Tips)))
   834  		return errormsg.ErrInvalidBlock, false
   835  	}
   836  
   837  	// verify whether the tips are unreachable from one another
   838  	if !chain.VerifyNonReachability(dbtx, bl) {
   839  		block_logger.Warnf("Rejecting !! Block has invalid reachability")
   840  		return errormsg.ErrInvalidBlock, false
   841  
   842  	}
   843  
   844  	// if the block is referencing any past tip too distant into main chain discard now
   845  	// TODO FIXME this need to computed
   846  	for i := range bl.Tips {
   847  		rusty_tip_base_distance := chain.calculate_mainchain_distance(dbtx, bl.Tips[i])
   848  
   849  		// tips of deviation >= 8 will rejected
   850  		if (int64(chain.Get_Height()) - rusty_tip_base_distance) >= config.STABLE_LIMIT {
   851  			block_logger.Warnf("Rusty TIP  mined by ROGUE miner discarding block %s  best height %d deviation %d rusty_tip %d", bl.Tips[i], chain.Get_Height(), (int64(chain.Get_Height()) - rusty_tip_base_distance), rusty_tip_base_distance)
   852  			return errormsg.ErrInvalidBlock, false
   853  		}
   854  	}
   855  
   856  	// verify difficulty of tips provided
   857  	if len(bl.Tips) > 1 {
   858  		best_tip := chain.find_best_tip_cumulative_difficulty(dbtx, bl.Tips)
   859  		for i := range bl.Tips {
   860  			if best_tip != bl.Tips[i] {
   861  				if !chain.validate_tips(dbtx, best_tip, bl.Tips[i]) { // reference is first
   862  					block_logger.Warnf("Rusty tip mined by ROGUE miner, discarding block")
   863  					return errormsg.ErrInvalidBlock, false
   864  				}
   865  			}
   866  		}
   867  	}
   868  
   869  	// check whether the block crosses the size limit
   870  	// block size is calculate by adding all the txs
   871  	// block header/miner tx is excluded, only tx size if calculated
   872  	{
   873  		block_size := 0
   874  		for i := 0; i < len(cbl.Txs); i++ {
   875  			block_size += len(cbl.Txs[i].Serialize())
   876  			if uint64(block_size) >= config.CRYPTONOTE_MAX_BLOCK_SIZE {
   877  				block_logger.Warnf("Block is bigger than max permitted, Rejecting it Actual %d MAX %d ", block_size, config.CRYPTONOTE_MAX_BLOCK_SIZE)
   878  				return errormsg.ErrInvalidSize, false
   879  			}
   880  		}
   881  	}
   882  
   883  	//logger.Infof("pow hash %s height %d", bl.GetPoWHash(), block_height)
   884  
   885  	// Verify Blocks Proof-Of-Work
   886  	// check if the PoW is satisfied
   887  	if !chain.VerifyPoW(dbtx, bl) { // if invalid Pow, reject the bloc
   888  		block_logger.Warnf("Block has invalid PoW, rejecting it %x", bl.Serialize())
   889  		return errormsg.ErrInvalidPoW, false
   890  	}
   891  
   892  	// verify coinbase tx
   893  	if !chain.Verify_Transaction_Coinbase(dbtx, cbl, &bl.Miner_TX) {
   894  		block_logger.Warnf("Miner tx failed verification  rejecting ")
   895  		return errormsg.ErrInvalidBlock, false
   896  	}
   897  
   898  	// from version 2, minertx should contain the 0 as base reward as it is calculated by client protocol
   899  	if chain.Get_Current_Version_at_Height(block_height) >= 2 && bl.Miner_TX.Vout[0].Amount != 0 {
   900  		block_logger.Warnf("Miner tx failed should have block reward as zero,rejecting block")
   901  		return errormsg.ErrInvalidBlock, false
   902  	}
   903  
   904  	{
   905  		// now we need to verify each and every tx in detail
   906  		// we need to verify each and every tx contained in the block, sanity check everything
   907  		// first of all check, whether all the tx contained in the block, match their hashes
   908  		{
   909  			if len(bl.Tx_hashes) != len(cbl.Txs) {
   910  				block_logger.Warnf("Block says it has %d txs , however complete block contained %d txs", len(bl.Tx_hashes), len(cbl.Txs))
   911  				return errormsg.ErrInvalidBlock, false
   912  			}
   913  
   914  			// first check whether the complete block contains any diplicate hashes
   915  			tx_checklist := map[crypto.Hash]bool{}
   916  			for i := 0; i < len(bl.Tx_hashes); i++ {
   917  				tx_checklist[bl.Tx_hashes[i]] = true
   918  			}
   919  
   920  			if len(tx_checklist) != len(bl.Tx_hashes) { // block has duplicate tx, reject
   921  				block_logger.Warnf("Block has %d  duplicate txs, reject it", len(bl.Tx_hashes)-len(tx_checklist))
   922  				return errormsg.ErrInvalidBlock, false
   923  
   924  			}
   925  			// now lets loop through complete block, matching each tx
   926  			// detecting any duplicates using txid hash
   927  			for i := 0; i < len(cbl.Txs); i++ {
   928  				tx_hash := cbl.Txs[i].GetHash()
   929  				if _, ok := tx_checklist[tx_hash]; !ok {
   930  					// tx is NOT found in map, RED alert reject the block
   931  					block_logger.Warnf("Block says it has tx %s, but complete block does not have it", tx_hash)
   932  					return errormsg.ErrInvalidBlock, false
   933  				}
   934  			}
   935  		}
   936  
   937  		// another check, whether the tx contains any duplicate key images within the block
   938  		// block wide duplicate input detector
   939  		// TODO FIXME replace with a simple map
   940  		{
   941  			key_image_map := map[crypto.Hash]bool{}
   942  			for i := 0; i < len(cbl.Txs); i++ {
   943  				for j := 0; j < len(cbl.Txs[i].Vin); j++ {
   944  					if _, ok := key_image_map[cbl.Txs[i].Vin[j].(transaction.Txin_to_key).K_image]; ok {
   945  						block_logger.Warnf("Double Spend attack within block %s", cbl.Txs[i].GetHash())
   946  						return errormsg.ErrTXDoubleSpend, false
   947  					}
   948  					key_image_map[cbl.Txs[i].Vin[j].(transaction.Txin_to_key).K_image] = true
   949  				}
   950  			}
   951  		}
   952  
   953  		// TODO FIXME
   954  		// we need to check whether the dishonest miner is trying to include junk transactions which have been already mined and confirmed
   955  		// for these purposes we track keyimages with height where they have been spent
   956  		// so if a block contains any key images from earlier the stable point, reject the block even if PoW was good
   957  
   958  		for i := 0; i < len(cbl.Txs); i++ { // loop through all the TXs
   959  			for j := 0; j < len(cbl.Txs[i].Vin); j++ {
   960  				keyimage_height, ok := chain.Read_KeyImage_Status(dbtx, cbl.Txs[i].Vin[j].(transaction.Txin_to_key).K_image)
   961  				if ok && block_height-keyimage_height > 13 { // why 13, because reachability checks for 15
   962  					block_logger.Warnf("Dead TX attack tx %s contains DEAD transactions, rejecting ", cbl.Txs[i].GetHash())
   963  					return errormsg.ErrTXDead, false
   964  				}
   965  			}
   966  		}
   967  
   968  		// we also need to reject if the the immediately reachable history, has spent the keyimage
   969  		// both the checks works on the basis of keyimages and not on the basis of txhash
   970  		reachable_key_images := chain.BuildReachabilityKeyImages(dbtx, bl)
   971  
   972  		//block_logger.Infof("len of reachable keyimages %d", len(reachable_key_images))
   973  		for i := 0; i < len(cbl.Txs); i++ { // loop through all the TXs
   974  			for j := 0; j < len(cbl.Txs[i].Vin); j++ {
   975  				if _, ok := reachable_key_images[cbl.Txs[i].Vin[j].(transaction.Txin_to_key).K_image]; ok {
   976  					block_logger.Warnf("Double spend attack tx %s is already mined, rejecting ", cbl.Txs[i].GetHash())
   977  					return errormsg.ErrTXDead, false
   978  				}
   979  			}
   980  		}
   981  
   982  		// verify all non coinbase tx, single threaded, we have a multithreaded version below
   983  
   984  		/*
   985  			hf_version := chain.Get_Current_Version_at_Height(chain.Calculate_Height_At_Tips(dbtx, bl.Tips))
   986  
   987  			for i := 0 ; i < len(cbl.Txs); i++ {
   988  				rlog.Debugf("addcomplete block tx %s hf_version %d  height %d", cbl.Txs[i].GetHash(), hf_version, chain.Calculate_Height_At_Tips(dbtx, bl.Tips)  )
   989  
   990  				if !chain.Verify_Transaction_NonCoinbase(dbtx,hf_version,cbl.Txs[i]){
   991  					logger.Warnf("Non Coinbase tx failed verification  rejecting " )
   992  				 	return errormsg.ErrInvalidTX, false
   993  				}
   994  			}
   995  		*/
   996  
   997  		// we need to anyways verify the TXS since RCT signatures are not covered by checksum
   998  		fail_count := int32(0)
   999  		wg := sync.WaitGroup{}
  1000  		wg.Add(len(cbl.Txs)) // add total number of tx as work
  1001  
  1002  		hf_version := chain.Get_Current_Version_at_Height(chain.Calculate_Height_At_Tips(dbtx, bl.Tips))
  1003  		for i := 0; i < len(cbl.Txs); i++ {
  1004  			go func(j int) {
  1005  
  1006  				// NOTE : do NOT skip verification of Ring Signatures, even if the TX is already stored
  1007  				//        as change of conditions might cause the signature to be invalid
  1008  				if !chain.Verify_Transaction_NonCoinbase(dbtx, hf_version, cbl.Txs[j]) { // transaction verification failed
  1009  					atomic.AddInt32(&fail_count, 1) // increase fail count by 1
  1010  					block_logger.Warnf("Block verification failed rejecting since TX  %s verification failed", cbl.Txs[j].GetHash())
  1011  				}
  1012  				wg.Done()
  1013  			}(i)
  1014  		}
  1015  
  1016  		wg.Wait()           // wait for verifications to finish
  1017  		if fail_count > 0 { // check the result
  1018  			block_logger.Warnf("Block verification failed  rejecting since TX verification failed ")
  1019  			return errormsg.ErrInvalidTX, false
  1020  		}
  1021  
  1022  	}
  1023  
  1024  	// we are here means everything looks good, proceed and save to chain
  1025  skip_checks:
  1026  
  1027  	// save all the txs
  1028  	// and then save the block
  1029  	{ // first lets save all the txs, together with their link to this block as height
  1030  		for i := 0; i < len(cbl.Txs); i++ {
  1031  			chain.Store_TX(dbtx, cbl.Txs[i])
  1032  		}
  1033  	}
  1034  
  1035  	chain.Store_BL(dbtx, bl)
  1036  
  1037  	// if the block is on a lower height tip, the block will not increase chain height
  1038  	height := chain.Load_Height_for_BL_ID(dbtx, block_hash)
  1039  	if height > chain.Get_Height() || height == 0 { // exception for genesis block
  1040  		atomic.StoreInt64(&chain.Height, height)
  1041  		chain.Store_TOP_HEIGHT(dbtx, height)
  1042  		rlog.Infof("Chain extended new height %d blid %s", chain.Height, block_hash)
  1043  
  1044  	} else {
  1045  		rlog.Infof("Chain extended but height is same %d blid %s", chain.Height, block_hash)
  1046  
  1047  	}
  1048  
  1049  	// calculate new set of tips
  1050  	// this is done by removing all known tips which are in the past
  1051  	// and add this block as tip
  1052  
  1053  	past := chain.Get_Block_Past(dbtx, bl.GetHash())
  1054  
  1055  	old_tips := chain.load_TIPS(dbtx)
  1056  	tips_map := map[crypto.Hash]bool{bl.GetHash(): true} // add this new block as tip
  1057  	for i := range old_tips {
  1058  		tips_map[old_tips[i]] = true
  1059  	}
  1060  	for i := range past {
  1061  		delete(tips_map, past[i])
  1062  	}
  1063  
  1064  	tips := []crypto.Hash{}
  1065  	for k, _ := range tips_map {
  1066  		tips = append(tips, k)
  1067  	}
  1068  	chain.store_TIPS(dbtx, tips)
  1069  
  1070  	// find the biggest tip  in terms of work
  1071  	{
  1072  		tips := chain.load_TIPS(dbtx)
  1073  		base, base_height := chain.find_common_base(dbtx, tips)
  1074  		best := chain.find_best_tip(dbtx, tips, base, base_height)
  1075  
  1076  		//logger.Infof("tips %+v  base %s ",tips, base)
  1077  
  1078  		// we  only generate full order for the biggest tip
  1079  
  1080  		//gbl := Generate_Genesis_Block()
  1081  		// full_order := chain.Generate_Full_Order( bl.GetHash(), gbl.GetHash(), 0,0)
  1082  		//base_topo_index := chain.Load_Block_Topological_order(gbl.GetHash())
  1083  
  1084  		full_order := chain.Generate_Full_Order(dbtx, best, base, base_height, 0)
  1085  		base_topo_index := chain.Load_Block_Topological_order(dbtx, base)
  1086  
  1087  		highest_topo := int64(0)
  1088  
  1089  		// we must also run the client protocol in reverse to undo changes in already existing  order
  1090  
  1091  		// reverse the order
  1092  		// for i, j := 0, len(full_order)-1; i < j; i, j = i+1, j-1 {
  1093  		//     full_order[i], full_order[j] = full_order[j], full_order[i]
  1094  		// }
  1095  
  1096  		rlog.Infof("Full order %+v base %s base topo pos %d", full_order, base, base_topo_index)
  1097  
  1098  		last_topo_height := chain.Load_TOPO_HEIGHT(dbtx)
  1099  
  1100  		if len(bl.Tips) == 0 {
  1101  			base_topo_index = 0
  1102  		}
  1103  
  1104  		// run the client_protocol_reverse , till we reach the base block
  1105  		for last_topo_height > 0 {
  1106  			last_topo_block, err := chain.Load_Block_Topological_order_at_index(dbtx, last_topo_height)
  1107  
  1108  			if err != nil {
  1109  				logger.Warnf("Block not found while running client protocol in reverse %s, probably DB corruption", last_topo_block)
  1110  				return errormsg.ErrInvalidBlock, false
  1111  			}
  1112  
  1113  			bl_current, err := chain.Load_BL_FROM_ID(dbtx, last_topo_block)
  1114  			if err != nil {
  1115  				block_logger.Debugf("Cannot load block %s for client protocol reverse,probably DB corruption ", last_topo_block)
  1116  				return errormsg.ErrInvalidBlock, false
  1117  			}
  1118  
  1119  			rlog.Debugf("running client protocol in reverse for %s", last_topo_block)
  1120  
  1121  			chain.client_protocol_reverse(dbtx, bl_current, last_topo_block)
  1122  
  1123  			// run client protocol in reverse till we reach base
  1124  			if last_topo_block != full_order[0] {
  1125  				last_topo_height--
  1126  			} else {
  1127  				break
  1128  			}
  1129  		}
  1130  
  1131  		// TODO FIXME we must avoid reprocessing  base block and or duplicate blocks, no point in reprocessing it
  1132  		for i := int64(0); i < int64(len(full_order)); i++ {
  1133  
  1134  			chain.Store_Block_Topological_order(dbtx, full_order[i], i+base_topo_index)
  1135  			highest_topo = base_topo_index + i
  1136  
  1137  			rlog.Debugf("%d %s   topo_index %d  base topo %d", i, full_order[i], i+base_topo_index, base_topo_index)
  1138  
  1139  			// TODO we must run smart contracts and TXs in this order
  1140  			// basically client protocol must run here
  1141  			// even if the HF has triggered we may still accept, old blocks for some time
  1142  			// so hf is detected block-wise and processed as such
  1143  
  1144  			bl_current_hash := full_order[i]
  1145  			bl_current, err1 := chain.Load_BL_FROM_ID(dbtx, bl_current_hash)
  1146  			if err1 != nil {
  1147  				block_logger.Debugf("Cannot load block %s for client protocol,probably DB corruption", bl_current_hash)
  1148  				return errormsg.ErrInvalidBlock, false
  1149  			}
  1150  
  1151  			height_current := chain.Calculate_Height_At_Tips(dbtx, bl_current.Tips)
  1152  			hard_fork_version_current := chain.Get_Current_Version_at_Height(height_current)
  1153  
  1154  			//  run full client protocol and find valid transactions
  1155  			// find all transactions within this block which are NOT double-spend
  1156  			// if any double-SPEND are found ignore them, else collect their fees to give to miner
  1157  			total_fees := chain.client_protocol(dbtx, bl_current, bl_current_hash, height_current, highest_topo)
  1158  
  1159  			rlog.Debugf("running client protocol for %s minertx %s  topo %d", bl_current_hash, bl_current.Miner_TX.GetHash(), highest_topo)
  1160  
  1161  			// store and parse miner tx
  1162  			chain.Store_TX(dbtx, &bl_current.Miner_TX)
  1163  			chain.Store_TX_Height(dbtx, bl_current.Miner_TX.GetHash(), highest_topo)
  1164  
  1165  			// mark TX found in this block also  for explorer
  1166  			chain.store_TX_in_Block(dbtx, bl_current_hash, bl_current.Miner_TX.GetHash())
  1167  
  1168  			//mark tx found in this block is valid
  1169  			chain.mark_TX(dbtx, bl_current_hash, bl_current.Miner_TX.GetHash(), true)
  1170  
  1171  			// hard fork version is used to import transactions from earlier version of DERO chain
  1172  			// in order to keep things simple, the earlier emission/fees calculation/dynamic block size has been discarded
  1173  			// due to above reasons miner TX from the earlier could NOT be verified
  1174  			// emission calculations/ total supply should NOT change when importing earlier chain
  1175  			if hard_fork_version_current == 1 {
  1176  
  1177  				// store total  reward
  1178  				dbtx.StoreUint64(BLOCKCHAIN_UNIVERSE, GALAXY_BLOCK, bl_current_hash[:], PLANET_MINERTX_REWARD, bl_current.Miner_TX.Vout[0].Amount)
  1179  
  1180  				// store base reward
  1181  				dbtx.StoreUint64(BLOCKCHAIN_UNIVERSE, GALAXY_BLOCK, bl_current_hash[:], PLANET_BASEREWARD, bl_current.Miner_TX.Vout[0].Amount)
  1182  
  1183  				// store total generated coins
  1184  				// this is hardcoded at initial chain import, keeping original emission schedule
  1185  				if globals.IsMainnet(){
  1186  						dbtx.StoreUint64(BLOCKCHAIN_UNIVERSE, GALAXY_BLOCK, bl_current_hash[:], PLANET_ALREADY_GENERATED_COINS, config.MAINNET_HARDFORK_1_TOTAL_SUPPLY)
  1187  					}else{
  1188  						dbtx.StoreUint64(BLOCKCHAIN_UNIVERSE, GALAXY_BLOCK, bl_current_hash[:], PLANET_ALREADY_GENERATED_COINS, config.TESTNET_HARDFORK_1_TOTAL_SUPPLY)
  1189  					}
  1190  				
  1191  
  1192  			} else { //  hf 2 or later generate miner TX rewards as per client protocol
  1193  
  1194  				past_coins_generated := chain.Load_Already_Generated_Coins_for_Topo_Index(dbtx, highest_topo-1)
  1195  
  1196  				base_reward := emission.GetBlockReward_Atlantis(hard_fork_version_current, past_coins_generated)
  1197  
  1198  				// base reward is only 90%, rest 10 % is pushed back
  1199  				if globals.IsMainnet(){
  1200  					base_reward = (base_reward * 9) / 10
  1201  				}
  1202  
  1203  				// lower reward for byzantine behaviour
  1204  				// for as many block as added
  1205  				if chain.isblock_SideBlock(dbtx, bl_current_hash, highest_topo) { // lost race (or byzantine behaviour)
  1206                                      if  hard_fork_version_current == 2 {
  1207  					base_reward = (base_reward * 67) / 100 // give only 67 % reward
  1208                                      }else{
  1209                                          base_reward = (base_reward * 8) / 100 // give only 8 % reward
  1210                                      }
  1211  				}
  1212  
  1213  				// logger.Infof("past coins generated %d base reward %d", past_coins_generated, base_reward)
  1214  
  1215  				// the total reward must be given to the miner TX, since it contains 0, we patch only the output
  1216  				// and leave the original TX untouched
  1217  				total_reward := base_reward + total_fees
  1218  
  1219  				// store total  reward
  1220  				dbtx.StoreUint64(BLOCKCHAIN_UNIVERSE, GALAXY_BLOCK, bl_current_hash[:], PLANET_MINERTX_REWARD, total_reward)
  1221  
  1222  				// store base reward
  1223  				dbtx.StoreUint64(BLOCKCHAIN_UNIVERSE, GALAXY_BLOCK, bl_current_hash[:], PLANET_BASEREWARD, base_reward)
  1224  
  1225  				// store total generated coins
  1226  				dbtx.StoreUint64(BLOCKCHAIN_UNIVERSE, GALAXY_BLOCK, bl_current_hash[:], PLANET_ALREADY_GENERATED_COINS, past_coins_generated+base_reward)
  1227  
  1228  				//logger.Infof("base reward %s  total generated %s",globals.FormatMoney12(base_reward), globals.FormatMoney12(past_coins_generated+base_reward))
  1229  
  1230  			}
  1231  
  1232  			// TODO FIXME valid transactions must be found and thier fees should be added as reward
  1233  
  1234  			// output index starts from the ending of the previous block
  1235  
  1236  			// get previous block
  1237  			output_index_start := int64(0)
  1238  			if (highest_topo - 1) >= 0 {
  1239  				previous_block, err1 := chain.Load_Block_Topological_order_at_index(dbtx, highest_topo-1)
  1240  				if err1 != nil {
  1241  					logger.Warnf("Errr could not find topo index of previous block")
  1242  					return errormsg.ErrInvalidBlock, false
  1243  				}
  1244  				// we will start where the previous block vouts ended
  1245  				_, output_index_start = chain.Get_Block_Output_Index(dbtx, previous_block)
  1246  			}
  1247  			if !chain.write_output_index(dbtx, bl_current_hash, output_index_start, hard_fork_version_current) {
  1248  				logger.Warnf("Since output index data cannot be wrritten, skipping block")
  1249  				return errormsg.ErrInvalidBlock, false
  1250  			}
  1251  
  1252  			// this tx must be stored, linked with this block
  1253  
  1254  		}
  1255  
  1256  		chain.Store_TOPO_HEIGHT(dbtx, int64(highest_topo))
  1257  
  1258  		// set main chain as new topo order
  1259  		// we must discard any rusty tips after they go stale
  1260  		best_height := int64(chain.Load_Height_for_BL_ID(dbtx, best))
  1261  
  1262  		new_tips := []crypto.Hash{}
  1263  		for i := range tips {
  1264  			rusty_tip_base_distance := chain.calculate_mainchain_distance(dbtx, tips[i])
  1265  			// tips of deviation > 6 will be rejected
  1266  			if (best_height - rusty_tip_base_distance) < (config.STABLE_LIMIT - 1) {
  1267  				new_tips = append(new_tips, tips[i])
  1268  
  1269  			} else { // this should be a rarest event, probably should never occur, until the network is under sever attack
  1270  				logger.Warnf("Rusty TIP declared stale %s  best height %d deviation %d rusty_tip %d", tips[i], best_height, (best_height - rusty_tip_base_distance), rusty_tip_base_distance)
  1271  				chain.transaction_scavenger(dbtx, tips[i]) // scavenge tx if possible
  1272  				// TODO we must include any TX from the orphan blocks back to the mempool to avoid losing any TX
  1273  			}
  1274  		}
  1275  
  1276  		// do more cleanup of tips for byzantine behaviour
  1277  		// this copy is necessary, otherwise data corruption occurs
  1278  		tips = append([]crypto.Hash{},new_tips...) 
  1279  		new_tips = new_tips[:0]
  1280  		best_tip := chain.find_best_tip_cumulative_difficulty(dbtx, tips)
  1281  		
  1282  		new_tips = append(new_tips, best_tip)
  1283  		for i := range tips {
  1284  			if best_tip != tips[i] {
  1285  				if !chain.validate_tips(dbtx, best_tip, tips[i]) { // reference is first
  1286  					logger.Warnf("Rusty tip %s declaring stale", tips[i])
  1287  					chain.transaction_scavenger(dbtx, tips[i]) // scavenge tx if possible
  1288  				} else {
  1289  					new_tips = append(new_tips, tips[i])
  1290  				}
  1291  			}
  1292  		}
  1293  
  1294  		rlog.Infof("New tips(after adding %s) %+v", bl.GetHash(), new_tips)
  1295  		chain.store_TIPS(dbtx, new_tips)
  1296  
  1297  	}
  1298  
  1299  	//chain.store_TIPS(chain.)
  1300  
  1301  	//chain.Top_ID = block_hash // set new top block id
  1302  
  1303  	// every 200 block print a line
  1304  	if chain.Get_Height()%200 == 0 {
  1305  		block_logger.Infof("Chain Height %d", chain.Height)
  1306  	}
  1307  
  1308  	result = true
  1309  
  1310  	// TODO fix hard fork
  1311  	// maintain hard fork votes to keep them SANE
  1312  	//chain.Recount_Votes() // does not return anything
  1313  
  1314  	// enable mempool book keeping
  1315  
  1316  	func() {
  1317  		if r := recover(); r != nil {
  1318  			logger.Warnf("Mempool House Keeping triggered panic height = %d", block_height)
  1319  		}
  1320  
  1321  		// discard the transactions from mempool if they are present there
  1322  		chain.Mempool.Monitor()
  1323  
  1324  		for i := 0; i < len(cbl.Txs); i++ {
  1325  			txid := cbl.Txs[i].GetHash()
  1326  			if chain.Mempool.Mempool_TX_Exist(txid) {
  1327  				rlog.Tracef(1, "Deleting TX from pool txid=%s", txid)
  1328  				chain.Mempool.Mempool_Delete_TX(txid)
  1329  			}
  1330  		}
  1331  
  1332  		// give mempool an oppurtunity to clean up tx, but only if they are not mined
  1333  		// but only check for double spending
  1334  		chain.Mempool.HouseKeeping(uint64(block_height), func(tx *transaction.Transaction) bool {
  1335  			return chain.Verify_Transaction_NonCoinbase_DoubleSpend_Check(dbtx, tx)
  1336  		})
  1337  	}()
  1338  
  1339  	return // run any handlers necesary to atomically
  1340  }
  1341  
  1342  // runs the client protocol which includes the following operations
  1343  // if any TX are being duplicate or double-spend ignore them
  1344  // mark all the valid transactions as valid
  1345  // mark all invalid transactions  as invalid
  1346  // calculate total fees based on valid TX
  1347  // we need NOT check ranges/ring signatures here, as they have been done already by earlier steps
  1348  func (chain *Blockchain) client_protocol(dbtx storage.DBTX, bl *block.Block, blid crypto.Hash, height int64, topoheight int64) (total_fees uint64) {
  1349  	// run client protocol for all TXs
  1350  	for i := range bl.Tx_hashes {
  1351  		tx, err := chain.Load_TX_FROM_ID(dbtx, bl.Tx_hashes[i])
  1352  		if err != nil {
  1353  			panic(fmt.Errorf("Cannot load  tx for %x err %s ", bl.Tx_hashes[i], err))
  1354  		}
  1355  		// mark TX found in this block also  for explorer
  1356  		chain.store_TX_in_Block(dbtx, blid, bl.Tx_hashes[i])
  1357  
  1358  		// check all key images as double spend, if double-spend detected mark invalid, else consider valid
  1359  		if chain.Verify_Transaction_NonCoinbase_DoubleSpend_Check(dbtx, tx) {
  1360  
  1361  			chain.consume_keyimages(dbtx, tx, height) // mark key images as consumed
  1362  			total_fees += tx.RctSignature.Get_TX_Fee()
  1363  
  1364  			chain.Store_TX_Height(dbtx, bl.Tx_hashes[i], topoheight) // link the tx with the topo height
  1365  
  1366  			//mark tx found in this block is valid
  1367  			chain.mark_TX(dbtx, blid, bl.Tx_hashes[i], true)
  1368  
  1369  		} else { // TX is double spend or reincluded by 2 blocks simultaneously
  1370  			rlog.Tracef(1,"Double spend TX is being ignored %s %s", blid, bl.Tx_hashes[i])
  1371  			chain.mark_TX(dbtx, blid, bl.Tx_hashes[i], false)
  1372  		}
  1373  	}
  1374  
  1375  	return total_fees
  1376  }
  1377  
  1378  // this undoes everything that is done by client protocol
  1379  // NOTE: this will have any effect, only if client protocol has been run on this block earlier
  1380  func (chain *Blockchain) client_protocol_reverse(dbtx storage.DBTX, bl *block.Block, blid crypto.Hash) {
  1381  	// run client protocol for all TXs
  1382  	for i := range bl.Tx_hashes {
  1383  		tx, err := chain.Load_TX_FROM_ID(dbtx, bl.Tx_hashes[i])
  1384  		if err != nil {
  1385  			panic(fmt.Errorf("Cannot load  tx for %x err %s ", bl.Tx_hashes[i], err))
  1386  		}
  1387  		// only the  valid TX must be revoked
  1388  		if chain.IS_TX_Valid(dbtx, blid, bl.Tx_hashes[i]) {
  1389  			chain.revoke_keyimages(dbtx, tx) // mark key images as not used
  1390  
  1391  			chain.Store_TX_Height(dbtx, bl.Tx_hashes[i], -1) // unlink the tx with the topo height
  1392  
  1393  			//mark tx found in this block is invalid
  1394  			chain.mark_TX(dbtx, blid, bl.Tx_hashes[i], false)
  1395  
  1396  		} else { // TX is double spend or reincluded by 2 blocks simultaneously
  1397  			// invalid tx is related
  1398  		}
  1399  	}
  1400  
  1401  	return
  1402  }
  1403  
  1404  // scavanger for transactions from rusty/stale tips to reinsert them into pool
  1405  func (chain *Blockchain) transaction_scavenger(dbtx storage.DBTX, blid crypto.Hash) {
  1406  	defer func() {
  1407  		if r := recover(); r != nil {
  1408  			logger.Warnf("Recovered while transaction scavenging, Stack trace below ")
  1409  			logger.Warnf("Stack trace  \n%s", debug.Stack())
  1410  		}
  1411  	}()
  1412  
  1413  	logger.Debugf("scavenging transactions from blid %s", blid)
  1414  	reachable_blocks := chain.BuildReachableBlocks(dbtx, []crypto.Hash{blid})
  1415  	reachable_blocks[blid] = true // add self
  1416  	for k, _ := range reachable_blocks {
  1417  		if chain.Is_Block_Orphan(k) {
  1418  			bl, err := chain.Load_BL_FROM_ID(dbtx, k)
  1419  			if err == nil {
  1420  				for i := range bl.Tx_hashes {
  1421  					tx, err := chain.Load_TX_FROM_ID(dbtx, bl.Tx_hashes[i])
  1422  					if err != nil {
  1423  						rlog.Warnf("err while scavenging blid %s  txid %s err %s", k, bl.Tx_hashes[i], err)
  1424  					} else {
  1425  						// add tx to pool, it will do whatever is necessarry
  1426  						chain.Add_TX_To_Pool(tx)
  1427  					}
  1428  				}
  1429  			} else {
  1430  				rlog.Warnf("err while scavenging blid %s err %s", k, err)
  1431  			}
  1432  		}
  1433  	}
  1434  }
  1435  
  1436  // Finds whether a  block is orphan
  1437  // since we donot store any fields, we need to calculate/find the block as orphan
  1438  // using an algorithm
  1439  // if the block is NOT topo ordered , it is orphan/stale
  1440  func (chain *Blockchain) Is_Block_Orphan(hash crypto.Hash) bool {
  1441  	return !chain.Is_Block_Topological_order(nil, hash)
  1442  }
  1443  
  1444  // this is used to find if a tx is orphan, YES orphan TX
  1445  // these can occur during  when they are detect to be double-spended on
  1446  // so the TX becomes orphan ( chances are less may be less that .000001 % but they are there)
  1447  // if a tx is not valid in any of the blocks, it has been mined it is orphan
  1448  func (chain *Blockchain) Is_TX_Orphan(hash crypto.Hash) (result bool) {
  1449  	blocks := chain.Load_TX_blocks(nil, hash)
  1450  	for i := range blocks {
  1451  		if chain.IS_TX_Valid(nil, blocks[i], hash) && chain.Is_Block_Topological_order(nil, blocks[i]) {
  1452  			return false
  1453  		}
  1454  
  1455  	}
  1456  
  1457  	return true
  1458  
  1459  }
  1460  
  1461  // this is used to for quick syncs as entire blocks as SHA1,
  1462  // entires block can skipped for verification, if checksum matches what the devs have stored
  1463  func (chain *Blockchain) BlockCheckSum(cbl *block.Complete_Block) []byte {
  1464  	h := sha3.New256()
  1465  	h.Write(cbl.Bl.Serialize())
  1466  	for i := range cbl.Txs {
  1467  		h.Write(cbl.Txs[i].Serialize())
  1468  	}
  1469  	return h.Sum(nil)
  1470  }
  1471  
  1472  // this function will mark all the key images present in the tx as requested
  1473  // this is done so as they cannot be respent
  1474  // mark is int64  height
  1475  func (chain *Blockchain) mark_keyimages(dbtx storage.DBTX, tx *transaction.Transaction, height int64) bool {
  1476  	// mark keyimage as spent
  1477  	for i := 0; i < len(tx.Vin); i++ {
  1478  		k_image := tx.Vin[i].(transaction.Txin_to_key).K_image
  1479  		chain.Store_KeyImage(dbtx, crypto.Hash(k_image), height)
  1480  	}
  1481  	return true
  1482  }
  1483  
  1484  //this will mark all the keyimages present in this TX as spent
  1485  //this is done so as an input cannot be spent twice
  1486  func (chain *Blockchain) consume_keyimages(dbtx storage.DBTX, tx *transaction.Transaction, height int64) bool {
  1487  	return chain.mark_keyimages(dbtx, tx, height)
  1488  }
  1489  
  1490  //this will mark all the keyimages present in this TX as unspent
  1491  //this is required during  client protocol runs
  1492  func (chain *Blockchain) revoke_keyimages(dbtx storage.DBTX, tx *transaction.Transaction) bool {
  1493  	return chain.mark_keyimages(dbtx, tx, -1) // -1 is a marker that stored key-image is not valid
  1494  }
  1495  
  1496  /* this will only give you access to transactions which have been mined
  1497   */
  1498  func (chain *Blockchain) Get_TX(dbtx storage.DBTX, hash crypto.Hash) (*transaction.Transaction, error) {
  1499  	tx, err := chain.Load_TX_FROM_ID(dbtx, hash)
  1500  
  1501  	return tx, err
  1502  }
  1503  
  1504  // This will get the biggest height of tip for hardfork version and other calculations
  1505  // get biggest height of parent, add 1
  1506  func (chain *Blockchain) Calculate_Height_At_Tips(dbtx storage.DBTX, tips []crypto.Hash) int64 {
  1507  	height := int64(0)
  1508  	if len(tips) == 0 { // genesis block has no parent
  1509  
  1510  	} else { // find the best height of past
  1511  		for i := range tips {
  1512  			past_height := chain.Load_Height_for_BL_ID(dbtx, tips[i])
  1513  			if height <= past_height {
  1514  				height = past_height
  1515  			}
  1516  		}
  1517  		height++
  1518  	}
  1519  	return height
  1520  
  1521  }
  1522  
  1523  // this function return the current top block, if we start at specific block
  1524  // this works for any blocks which were added
  1525  /*
  1526  func (chain *Blockchain) Get_Top_Block(block_id crypto.Hash) crypto.Hash {
  1527  	for {
  1528  		// check if the block has child, if not , we are the top
  1529  		if !chain.Does_Block_Have_Child(block_id) {
  1530  			return block_id
  1531  		}
  1532  		block_id = chain.Load_Block_Child(block_id) // continue searching the new top
  1533  	}
  1534  	//	panic("We can never reach this point")
  1535  	//	return block_id // we will never reach here
  1536  }
  1537  */
  1538  
  1539  // verifies whether we are lagging
  1540  // return true if we need resync
  1541  // returns false if we are good and resync is not required
  1542  func (chain *Blockchain) IsLagging(peer_cdiff *big.Int) bool {
  1543  
  1544  	our_diff := new(big.Int).SetInt64(0)
  1545  
  1546  	high_block, err := chain.Load_Block_Topological_order_at_index(nil, chain.Load_TOPO_HEIGHT(nil))
  1547  	if err != nil {
  1548  		return false
  1549  	} else {
  1550  		our_diff = chain.Load_Block_Cumulative_Difficulty(nil, high_block)
  1551  	}
  1552  	rlog.Tracef(2, "P_cdiff %s cdiff %d  our top block %s", peer_cdiff.String(), our_diff.String(), high_block)
  1553  
  1554  	if our_diff.Cmp(peer_cdiff) < 0 {
  1555  		return true // peer's cumulative difficulty is more than ours , active resync
  1556  	}
  1557  	return false
  1558  }
  1559  
  1560  // This function will expand a transaction with all the missing info being reconstitued from the blockchain
  1561  // this also increases security since data is coming from the chain or being calculated
  1562  // basically this places data for ring signature verification
  1563  // REMEMBER to expand key images from the blockchain
  1564  // TODO we must enforce that the keyimages used are valid and specific outputs are unlocked
  1565  func (chain *Blockchain) Expand_Transaction_v2(dbtx storage.DBTX, hf_version int64, tx *transaction.Transaction) (result bool) {
  1566  
  1567  	result = false
  1568  	if tx.Version != 2 {
  1569  		panic("TX not version 2")
  1570  	}
  1571  
  1572  	chain_height_cached := uint64(chain.Get_Height()) // do it once instead of in for loop
  1573  
  1574  	//if rctsignature is null
  1575  
  1576  	// fill up the message hash first
  1577  	tx.RctSignature.Message = crypto.Key(tx.GetPrefixHash())
  1578  
  1579  	// fill up the key images from the blockchain
  1580  	for i := 0; i < len(tx.Vin); i++ {
  1581  		tx.RctSignature.MlsagSigs[i].II = tx.RctSignature.MlsagSigs[i].II[:0] // zero it out
  1582  		tx.RctSignature.MlsagSigs[i].II = make([]crypto.Key, 1, 1)
  1583  		tx.RctSignature.MlsagSigs[i].II[0] = crypto.Key(tx.Vin[i].(transaction.Txin_to_key).K_image)
  1584  	}
  1585  
  1586  	// now we need to fill up the mixring ctkey
  1587  	// one part is the destination address, second is the commitment mask from the outpk
  1588  	// mixring is stored in different ways for rctfull and simple
  1589  
  1590  	switch tx.RctSignature.Get_Sig_Type() {
  1591  
  1592  	case ringct.RCTTypeFull:
  1593  		// TODO, we need to make sure all ring are of same size
  1594  
  1595  		if len(tx.Vin) > 1 {
  1596  			panic("unsupported ringct full case please investigate")
  1597  		}
  1598  
  1599  		// make a matrix of mixin x 1 elements
  1600  		mixin := len(tx.Vin[0].(transaction.Txin_to_key).Key_offsets)
  1601  		tx.RctSignature.MixRing = make([][]ringct.CtKey, mixin, mixin)
  1602  		for n := 0; n < len(tx.Vin); n++ {
  1603  			offset := uint64(0)
  1604  			for m := 0; m < len(tx.Vin[n].(transaction.Txin_to_key).Key_offsets); m++ {
  1605  				tx.RctSignature.MixRing[m] = make([]ringct.CtKey, len(tx.Vin), len(tx.Vin))
  1606  
  1607  				offset += tx.Vin[n].(transaction.Txin_to_key).Key_offsets[m]
  1608  				// extract the keys from specific offset
  1609  				offset_data, success := chain.load_output_index(dbtx, offset)
  1610  
  1611  				if !success {
  1612  					return false
  1613  				}
  1614  
  1615  				// check maturity of inputs
  1616  				if hf_version >= 2 && !inputmaturity.Is_Input_Mature(chain_height_cached, offset_data.Height, offset_data.Unlock_Height, offset_data.SigType) {
  1617  					rlog.Tracef(1, "transaction using immature inputs from block %d chain height %d", offset_data.Height, chain_height_cached)
  1618  					return false
  1619  				}
  1620  
  1621  				tx.RctSignature.MixRing[m][n].Destination = offset_data.InKey.Destination
  1622  				tx.RctSignature.MixRing[m][n].Mask = offset_data.InKey.Mask
  1623  				//	fmt.Printf("%d %d dest %s\n",n,m, offset_data.InKey.Destination)
  1624  				//	fmt.Printf("%d %d mask %s\n",n,m, offset_data.InKey.Mask)
  1625  
  1626  			}
  1627  		}
  1628  
  1629  	case ringct.RCTTypeSimple, ringct.RCTTypeSimpleBulletproof:
  1630  		mixin := len(tx.Vin[0].(transaction.Txin_to_key).Key_offsets)
  1631  		_ = mixin
  1632  		tx.RctSignature.MixRing = make([][]ringct.CtKey, len(tx.Vin), len(tx.Vin))
  1633  
  1634  		for n := 0; n < len(tx.Vin); n++ {
  1635  
  1636  			tx.RctSignature.MixRing[n] = make([]ringct.CtKey, len(tx.Vin[n].(transaction.Txin_to_key).Key_offsets),
  1637  				len(tx.Vin[n].(transaction.Txin_to_key).Key_offsets))
  1638  			offset := uint64(0)
  1639  
  1640  			// this test is being done keeping future in mind
  1641  			duplicate_destination_mask := map[crypto.Key]bool{}
  1642  			for m := 0; m < len(tx.Vin[n].(transaction.Txin_to_key).Key_offsets); m++ {
  1643  
  1644  				offset += tx.Vin[n].(transaction.Txin_to_key).Key_offsets[m]
  1645  
  1646  				// extract the keys from specific offset
  1647  				offset_data, success := chain.load_output_index(dbtx, offset)
  1648  
  1649  				if !success {
  1650  					return false
  1651  				}
  1652  
  1653  				//logger.Infof("Ring member %+v", offset_data)
  1654  
  1655  				//logger.Infof("cheight %d  ring member height %d  locked height %d sigtype %d", chain.Get_Height(), offset_data.Height, offset_data.Unlock_Height, 1 )
  1656  
  1657  				//logger.Infof("mature %+v  tx %s hf_version %d", inputmaturity.Is_Input_Mature(uint64(chain.Get_Height()), offset_data.Height, offset_data.Unlock_Height, 1), tx.GetHash(), hf_version)
  1658  
  1659  				// check maturity of inputs
  1660  				if hf_version >= 2 && !inputmaturity.Is_Input_Mature(chain_height_cached, offset_data.Height, offset_data.Unlock_Height, offset_data.SigType) {
  1661  					rlog.Tracef(1, "transaction using immature inputs from block %d chain height %d", offset_data.Height, chain_height_cached)
  1662  					return false
  1663  				}
  1664  				if _, ok := duplicate_destination_mask[offset_data.InKey.Destination]; ok {
  1665  					rlog.Warnf("Duplicate Keys tx hash %s", tx.GetHash())
  1666  					return false
  1667  				}
  1668  				if _, ok := duplicate_destination_mask[offset_data.InKey.Mask]; ok {
  1669  					rlog.Warnf("Duplicate Masks %s", tx.GetHash())
  1670  					return false
  1671  				}
  1672  
  1673  				duplicate_destination_mask[offset_data.InKey.Destination] = true
  1674  				duplicate_destination_mask[offset_data.InKey.Mask] = true
  1675  
  1676  				tx.RctSignature.MixRing[n][m].Destination = offset_data.InKey.Destination
  1677  				tx.RctSignature.MixRing[n][m].Mask = offset_data.InKey.Mask
  1678  				//	fmt.Printf("%d %d dest %s\n",n,m, offset_data.InKey.Destination)
  1679  				//	fmt.Printf("%d %d mask %s\n",n,m, offset_data.InKey.Mask)
  1680  
  1681  			}
  1682  		}
  1683  
  1684  	default:
  1685  		logger.Warnf("unknown ringct transaction")
  1686  		return false
  1687  	}
  1688  
  1689  	return true
  1690  }
  1691  
  1692  // this function count all the vouts of the block,
  1693  // this function exists here because  only the chain knows the tx
  1694  func (chain *Blockchain) Block_Count_Vout(dbtx storage.DBTX, block_hash crypto.Hash) (count uint64) {
  1695  	count = 1 // miner tx is always present
  1696  
  1697  	bl, err := chain.Load_BL_FROM_ID(dbtx, block_hash)
  1698  
  1699  	if err != nil {
  1700  		panic(fmt.Errorf("Cannot load  block for %s err %s", block_hash, err))
  1701  	}
  1702  
  1703  	for i := 0; i < len(bl.Tx_hashes); i++ { // load all tx one by one
  1704  		tx, err := chain.Load_TX_FROM_ID(dbtx, bl.Tx_hashes[i])
  1705  		if err != nil {
  1706  			panic(fmt.Errorf("Cannot load  tx for %s err %s", bl.Tx_hashes[i], err))
  1707  		}
  1708  
  1709  		// tx has been loaded, now lets get the vout
  1710  		vout_count := uint64(len(tx.Vout))
  1711  		count += vout_count
  1712  	}
  1713  	return count
  1714  }
  1715  
  1716  // tells whether the hash already exists in slice
  1717  func sliceExists(slice []crypto.Hash, hash crypto.Hash) bool {
  1718  	for i := range slice {
  1719  		if slice[i] == hash {
  1720  			return true
  1721  		}
  1722  	}
  1723  	return false
  1724  }
  1725  
  1726  // this function will rewind the chain from the topo height one block at a time
  1727  // this function also runs the client protocol in reverse and also deletes the block from the storage
  1728  func (chain *Blockchain) Rewind_Chain(rewind_count int) (result bool) {
  1729  	chain.Lock()
  1730  	defer chain.Unlock()
  1731  
  1732  	dbtx, err := chain.store.BeginTX(true)
  1733  	if err != nil {
  1734  		logger.Warnf("Could NOT rewind chain. Error opening writable TX, err %s", err)
  1735  		return false
  1736  	}
  1737  
  1738  	defer func() {
  1739  		// safety so if anything wrong happens, verification fails
  1740  		if r := recover(); r != nil {
  1741  			logger.Warnf("Recovered while rewinding chain, Stack trace below block_hash ")
  1742  			logger.Warnf("Stack trace  \n%s", debug.Stack())
  1743  			result = false
  1744  		}
  1745  
  1746  		if result == true { // block was successfully added, commit it atomically
  1747  			dbtx.Commit()
  1748  			dbtx.Sync() // sync the DB to disk after every execution of this function
  1749  		} else {
  1750  			dbtx.Rollback() // if block could not be added, rollback all changes to previous block
  1751  		}
  1752  	}()
  1753  
  1754  	// we must always rewind till a safety point is found
  1755  	stable_points := map[int64]bool{}
  1756  
  1757  	// we must till we reach a safe point
  1758  	// safe point is point where a single block exists at specific height
  1759  	// this may lead us to rewinding a it more
  1760  	//safe := false
  1761  
  1762  	// TODO we must fix safeness using the stable calculation
  1763  
  1764  	// keep rewinding till safe point is reached
  1765  	for done := 0; ; done++ {
  1766  		top_block_topo_index := chain.Load_TOPO_HEIGHT(dbtx)
  1767  
  1768  		//logger.Infof("stable points %d", len(stable_points))
  1769  		// keep rewinding till a safe point is not found
  1770  		if done >= rewind_count {
  1771  			if _, ok := stable_points[top_block_topo_index]; ok {
  1772  				break
  1773  			}
  1774  		}
  1775  
  1776  		if top_block_topo_index < 1 {
  1777  			logger.Warnf("Cannot rewind  genesis block  topoheight %d", top_block_topo_index)
  1778  			return false
  1779  		}
  1780  
  1781  		// check last 100 blocks for safety
  1782  		for i := int64(0); i < 50 && (top_block_topo_index-i >= 5); i++ {
  1783  			hash, err := chain.Load_Block_Topological_order_at_index(dbtx, top_block_topo_index-i)
  1784  			if err != nil {
  1785  				logger.Warnf("Cannot rewind chain at topoheight %d err: %s", top_block_topo_index, err)
  1786  				return false
  1787  			}
  1788  
  1789  			// TODO add a check whether a single block exists at this height,
  1790  			// if yes consider it as a sync block
  1791  			h := chain.Load_Height_for_BL_ID(dbtx, hash)
  1792  
  1793  			if len(chain.Get_Blocks_At_Height(dbtx, h)) != 1 { // we should have exactly 1 block at this height
  1794  				continue
  1795  			}
  1796  
  1797  			if chain.IsBlockSyncBlockHeight(dbtx, hash) {
  1798  				stable_points[top_block_topo_index-i] = true
  1799  			}
  1800  		}
  1801  
  1802  		blid, err := chain.Load_Block_Topological_order_at_index(dbtx, top_block_topo_index)
  1803  		if err != nil {
  1804  			logger.Warnf("Cannot rewind chain at topoheight %d err: %s", top_block_topo_index, err)
  1805  			return false
  1806  		}
  1807  
  1808  		blocks_at_height := chain.Get_Blocks_At_Height(dbtx, chain.Load_Height_for_BL_ID(dbtx, blid))
  1809  
  1810  		for _, blid := range blocks_at_height {
  1811  			// run the client protocol in reverse to undo keyimages
  1812  
  1813  			bl_current, err := chain.Load_BL_FROM_ID(dbtx, blid)
  1814  			if err != nil {
  1815  				logger.Warnf("Cannot load block %s for client protocol reverse ", blid)
  1816  				return false
  1817  			}
  1818  
  1819  			logger.Debugf("running client protocol in reverse for %s", blid)
  1820  			// run client protocol in reverse
  1821  			chain.client_protocol_reverse(dbtx, bl_current, blid)
  1822  
  1823  			// delete the tip
  1824  			tips := chain.load_TIPS(dbtx)
  1825  			new_tips := []crypto.Hash{}
  1826  
  1827  			for i := range tips {
  1828  				if tips[i] != blid {
  1829  					new_tips = append(new_tips, tips[i])
  1830  				}
  1831  			}
  1832  
  1833  			// all the tips consumed by this block become the new tips
  1834  			for i := range bl_current.Tips {
  1835  				new_tips = append(new_tips, bl_current.Tips[i])
  1836  			}
  1837  
  1838  			chain.store_TIPS(dbtx, new_tips) // store updated tips, we should rank and store them
  1839  
  1840  			dbtx.StoreObject(BLOCKCHAIN_UNIVERSE, GALAXY_BLOCK, blid[:], PLANET_BLOB, []byte(""))
  1841  			chain.Store_Block_Topological_order(dbtx, blid, -1)
  1842  
  1843  		}
  1844  
  1845  		// height of previous block becomes new height
  1846  		old_height := chain.Load_Height_for_BL_ID(dbtx, blid)
  1847  		chain.Store_TOP_HEIGHT(dbtx, old_height-1)
  1848  
  1849  		tmp_blocks_at_height := chain.Get_Blocks_At_Height(dbtx, old_height-1)
  1850  
  1851  		/*
  1852  			// we must unwind till the safe point is reached
  1853  			if len(tmp_blocks_at_height) == 1 && done >= rewind_count  && safe == false{
  1854  				rlog.Infof("Safety reached")
  1855  				safe = true
  1856  			}
  1857  
  1858  			if len(tmp_blocks_at_height) != 1 && done >= rewind_count  && safe == false{
  1859  				rlog.Infof("Safety not reached rewinding more")
  1860  			}
  1861  		*/
  1862  
  1863  		// this avoid possible database corruption by multiple blocks at same height
  1864  
  1865  		lowest_positive_topo := int64(0)
  1866  		for _, blid := range tmp_blocks_at_height {
  1867  			if chain.Is_Block_Topological_order(dbtx, blid) {
  1868  				lowest_positive_topo = chain.Load_Block_Topological_order(dbtx, blid)
  1869  			}
  1870  		}
  1871  		chain.Store_TOPO_HEIGHT(dbtx, lowest_positive_topo)
  1872  
  1873  		// no more blocks are stored at this height clean them
  1874  		dbtx.StoreObject(BLOCKCHAIN_UNIVERSE, GALAXY_HEIGHT, PLANET_HEIGHT, itob(uint64(old_height)), []byte{})
  1875  
  1876  	}
  1877  	rlog.Infof("height after rewind %d", chain.Load_TOPO_HEIGHT(dbtx))
  1878  
  1879  	return true
  1880  }
  1881  
  1882  
  1883  // build reachability graph upto 2*config deeps to answer reachability queries
  1884  func (chain *Blockchain) buildReachability_internal(dbtx storage.DBTX, reachmap map[crypto.Hash]bool, blid crypto.Hash, level int) {
  1885  	past := chain.Get_Block_Past(dbtx, blid)
  1886  	reachmap[blid] = true // add self to reach map
  1887  
  1888  	if level >= int(2*config.STABLE_LIMIT) { // stop recursion must be more than  checks in add complete block
  1889  		return
  1890  	}
  1891  	for i := range past { // if no past == genesis return
  1892  		if _, ok := reachmap[past[i]]; !ok { // process a node, only if has not been processed earlier
  1893  			chain.buildReachability_internal(dbtx, reachmap, past[i], level+1)
  1894  		}
  1895  	}
  1896  
  1897  }
  1898  
  1899  // build reachability graph upto 2*limit  deeps to answer reachability queries
  1900  func (chain *Blockchain) buildReachability(dbtx storage.DBTX, blid crypto.Hash) map[crypto.Hash]bool {
  1901  	reachmap := map[crypto.Hash]bool{}
  1902  	chain.buildReachability_internal(dbtx, reachmap, blid, 0)
  1903  	return reachmap
  1904  }
  1905  
  1906  // this is part of consensus rule, 2 tips cannot refer to their common parent
  1907  func (chain *Blockchain) VerifyNonReachability(dbtx storage.DBTX, bl *block.Block) bool {
  1908  
  1909  	reachmaps := make([]map[crypto.Hash]bool, len(bl.Tips), len(bl.Tips))
  1910  	for i := range bl.Tips {
  1911  		reachmaps[i] = chain.buildReachability(dbtx, bl.Tips[i])
  1912  	}
  1913  
  1914  	// bruteforce all reachability combinations, max possible 3x3 = 9 combinations
  1915  	for i := range bl.Tips {
  1916  		for j := range bl.Tips {
  1917  			if i == j { // avoid self test
  1918  				continue
  1919  			}
  1920  
  1921  			if _, ok := reachmaps[j][bl.Tips[i]]; ok { // if a tip can be referenced as another's past, this is not a tip , probably malicious, discard block
  1922  				return false
  1923  			}
  1924  
  1925  		}
  1926  	}
  1927  
  1928  	return true
  1929  }
  1930  
  1931  // used in the difficulty calculation for consensus and while scavenging
  1932  func (chain *Blockchain) BuildReachableBlocks(dbtx storage.DBTX, tips []crypto.Hash) map[crypto.Hash]bool {
  1933  	reachblocks := map[crypto.Hash]bool{} // contains a list of all reachable blocks
  1934  	for i := range tips {
  1935  		reachmap := chain.buildReachability(dbtx, tips[i])
  1936  		for k, _ := range reachmap {
  1937  			reachblocks[k] = true // build unique block list
  1938  		}
  1939  	}
  1940  	return reachblocks
  1941  }
  1942  
  1943  // this is part of consensus rule, reachable blocks cannot have keyimages collision with new blocks
  1944  // this is to avoid dishonest miners including dead transactions
  1945  //
  1946  func (chain *Blockchain) BuildReachabilityKeyImages(dbtx storage.DBTX, bl *block.Block) map[crypto.Hash]bool {
  1947  
  1948  	Keyimage_reach_map := map[crypto.Hash]bool{}
  1949  
  1950  	reachblocks := map[crypto.Hash]bool{} // contains a list of all reachable blocks
  1951  	for i := range bl.Tips {
  1952  		reachmap := chain.buildReachability(dbtx, bl.Tips[i])
  1953  		for k, _ := range reachmap {
  1954  			reachblocks[k] = true // build unique block list
  1955  		}
  1956  	}
  1957  
  1958  	// load all blocks and process their TX as per client protocol
  1959  	for blid, _ := range reachblocks {
  1960  
  1961  		bl, err := chain.Load_BL_FROM_ID(dbtx, blid)
  1962  		if err != nil {
  1963  			panic(fmt.Errorf("Cannot load  block for %s err %s", blid, err))
  1964  		}
  1965  
  1966  		for i := 0; i < len(bl.Tx_hashes); i++ { // load all tx one by one, skipping as per client_protocol
  1967  
  1968  			/*
  1969  				if !chain.IS_TX_Valid(dbtx, blid, bl.Tx_hashes[i]) { // skip invalid TX
  1970  					rlog.Tracef(1, "bl %s tx %s ignored while building key image reachability as per client protocol")
  1971  					continue
  1972  				}
  1973  			*/
  1974  
  1975  			tx, err := chain.Load_TX_FROM_ID(dbtx, bl.Tx_hashes[i])
  1976  			if err != nil {
  1977  				panic(fmt.Errorf("Cannot load  tx for %s err %s", bl.Tx_hashes[i], err))
  1978  			}
  1979  
  1980  			// tx has been loaded, now lets get all the key images
  1981  			for i := 0; i < len(tx.Vin); i++ {
  1982  				Keyimage_reach_map[tx.Vin[i].(transaction.Txin_to_key).K_image] = true // add element to map for next check
  1983  			}
  1984  		}
  1985  	}
  1986  	return Keyimage_reach_map
  1987  }
  1988  
  1989  // sync blocks have the following specific property
  1990  // 1) the block is singleton at this height
  1991  // basically the condition allow us to confirm weight of future blocks with reference to sync blocks
  1992  // these are the one who settle the chain and guarantee it
  1993  func (chain *Blockchain) IsBlockSyncBlockHeight(dbtx storage.DBTX, blid crypto.Hash) bool {
  1994  
  1995  	// TODO make sure that block exist
  1996  	height := chain.Load_Height_for_BL_ID(dbtx, blid)
  1997  	if height == 0 { // genesis is always a sync block
  1998  		return true
  1999  	}
  2000  
  2001  	//  top blocks are always considered unstable
  2002  	if (height + config.STABLE_LIMIT) > chain.Get_Height() {
  2003  		return false
  2004  	}
  2005  
  2006  	// if block is not ordered, it can never be sync block
  2007  	if !chain.Is_Block_Topological_order(dbtx, blid) {
  2008  		return false
  2009  	}
  2010  
  2011  	blocks := chain.Get_Blocks_At_Height(dbtx, height)
  2012  
  2013  	if len(blocks) == 0 && height != 0 { // this  should NOT occur
  2014  		panic("No block exists at this height, not possible")
  2015  	}
  2016  
  2017  	
  2018  	//   if len(blocks) == 1 { //  ideal blockchain case, it is a sync block
  2019  	//       return true
  2020  	//   }
  2021  	
  2022  
  2023  	// check whether single block exists in the TOPO order index, if no we are NOT a sync block
  2024  
  2025  	// we are here means we have one oor more block
  2026  	blocks_in_main_chain := 0
  2027  	for i := range blocks {
  2028  		if chain.Is_Block_Topological_order(dbtx, blocks[i]) {
  2029  			blocks_in_main_chain++
  2030  			if blocks_in_main_chain >= 2 {
  2031  				return false
  2032  			}
  2033  		}
  2034  	}
  2035  
  2036  	// we are here if we only have one block in topological order, others are  dumped/rejected blocks
  2037  
  2038  	// collect all blocks of past LIMIT heights
  2039  	var preblocks []crypto.Hash
  2040  	for i := height - 1; i >= (height-config.STABLE_LIMIT) && i != 0; i-- {
  2041  		blocks := chain.Get_Blocks_At_Height(dbtx, i)
  2042  		for j := range blocks { //TODO BUG BUG BUG we need to make sure only main chain blocks are considered
  2043  			preblocks = append(preblocks, blocks[j])
  2044  		}
  2045  	}
  2046  
  2047  	// we need to find a common base to compare them, otherwise comparision is futile  due to duplication
  2048  	sync_block_cumulative_difficulty := chain.Load_Block_Cumulative_Difficulty(dbtx, blid) //+ chain.Load_Block_Difficulty(blid)
  2049  
  2050  	// if any of the blocks  has a cumulative difficulty  more than  sync block, this situation affects  consensus, so mitigate it
  2051  	for i := range preblocks {
  2052  		cumulative_difficulty := chain.Load_Block_Cumulative_Difficulty(dbtx, preblocks[i]) // + chain.Load_Block_Difficulty(preblocks[i])
  2053  
  2054  		//if cumulative_difficulty >= sync_block_cumulative_difficulty {
  2055  		if cumulative_difficulty.Cmp(sync_block_cumulative_difficulty) >= 0 {
  2056  			rlog.Warnf("Mitigating CONSENSUS issue on block %s height %d  child %s cdiff %d sync block cdiff %d", blid, height, preblocks[i], cumulative_difficulty, sync_block_cumulative_difficulty)
  2057  			return false
  2058  		}
  2059  
  2060  	}
  2061  
  2062  	return true
  2063  }
  2064  
  2065  func (chain *Blockchain) IsBlockSyncBlockHeightSpecific(dbtx storage.DBTX, blid crypto.Hash, chain_height int64) bool {
  2066  
  2067  	// TODO make sure that block exist
  2068  	height := chain.Load_Height_for_BL_ID(dbtx, blid)
  2069  	if height == 0 { // genesis is always a sync block
  2070  		return true
  2071  	}
  2072  
  2073  	//  top blocks are always considered unstable
  2074  	if (height + config.STABLE_LIMIT) > chain_height {
  2075  		return false
  2076  	}
  2077  
  2078  	// if block is not ordered, it can never be sync block
  2079  	if !chain.Is_Block_Topological_order(dbtx, blid) {
  2080  		return false
  2081  	}
  2082  
  2083  	blocks := chain.Get_Blocks_At_Height(dbtx, height)
  2084  
  2085  	if len(blocks) == 0 && height != 0 { // this  should NOT occur
  2086  		panic("No block exists at this height, not possible")
  2087  	}
  2088  
  2089  	
  2090  	//   if len(blocks) == 1 { //  ideal blockchain case, it is a sync block
  2091  	//       return true
  2092  	//   }
  2093  	
  2094  
  2095  	// check whether single block exists in the TOPO order index, if no we are NOT a sync block
  2096  
  2097  	// we are here means we have one oor more block
  2098  	blocks_in_main_chain := 0
  2099  	for i := range blocks {
  2100  		if chain.Is_Block_Topological_order(dbtx, blocks[i]) {
  2101  			blocks_in_main_chain++
  2102  			if blocks_in_main_chain >= 2 {
  2103  				return false
  2104  			}
  2105  		}
  2106  	}
  2107  
  2108  	// we are here if we only have one block in topological order, others are  dumped/rejected blocks
  2109  
  2110  	// collect all blocks of past LIMIT heights
  2111  	var preblocks []crypto.Hash
  2112  	for i := height - 1; i >= (height-config.STABLE_LIMIT) && i != 0; i-- {
  2113  		blocks := chain.Get_Blocks_At_Height(dbtx, i)
  2114  		for j := range blocks { //TODO BUG BUG BUG we need to make sure only main chain blocks are considered
  2115  			preblocks = append(preblocks, blocks[j])
  2116  		}
  2117  	}
  2118  
  2119  	// we need to find a common base to compare them, otherwise comparision is futile  due to duplication
  2120  	sync_block_cumulative_difficulty := chain.Load_Block_Cumulative_Difficulty(dbtx, blid) //+ chain.Load_Block_Difficulty(blid)
  2121  
  2122  	// if any of the blocks  has a cumulative difficulty  more than  sync block, this situation affects  consensus, so mitigate it
  2123  	for i := range preblocks {
  2124  		cumulative_difficulty := chain.Load_Block_Cumulative_Difficulty(dbtx, preblocks[i]) // + chain.Load_Block_Difficulty(preblocks[i])
  2125  
  2126  		//if cumulative_difficulty >= sync_block_cumulative_difficulty {
  2127  		if cumulative_difficulty.Cmp(sync_block_cumulative_difficulty) >= 0 {
  2128  			rlog.Warnf("Mitigating CONSENSUS issue on block %s height %d  child %s cdiff %d sync block cdiff %d", blid, height, preblocks[i], cumulative_difficulty, sync_block_cumulative_difficulty)
  2129  			return false
  2130  		}
  2131  
  2132  	}
  2133  
  2134  	return true
  2135  }
  2136  
  2137  
  2138  // key is string of blid and appendded chain height
  2139  var tipbase_cache,_ = hashicorp_lru.New(10240)
  2140  
  2141  // base of a tip is last known sync point
  2142  // weight of bases in mentioned in term of height
  2143  // this must not employ any cache
  2144  func (chain *Blockchain) FindTipBase(dbtx storage.DBTX, blid crypto.Hash, chain_height int64) (bs BlockScore) {
  2145  
  2146  	// see if cache contains it
  2147  	if bsi,ok := tipbase_cache.Get(fmt.Sprintf("%s%d", blid,chain_height));ok{
  2148  		bs = bsi.(BlockScore)
  2149  		return bs
  2150  	}
  2151  
  2152  	defer func(){ // capture return value of bs to cache
  2153  		z := bs
  2154  		tipbase_cache.Add(fmt.Sprintf("%s%d", blid,chain_height),z)
  2155  	}()
  2156  
  2157  
  2158  	// if we are genesis return genesis block as base
  2159  
  2160  	/* bl,err := chain.Load_BL_FROM_ID(blid)
  2161  
  2162  	  if err != nil {
  2163  	   panic(fmt.Sprintf("Block NOT found %s", blid))
  2164  	  }
  2165  	  if len(bl.Tips) == 0 {
  2166  	       gbl := Generate_Genesis_Block()
  2167  
  2168  	//      logger.Infof("Return genesis block as base")
  2169  	      return BlockScore{gbl.GetHash(),0}
  2170  	  }
  2171  
  2172  	  bases := make([]BlockScore,len(bl.Tips),len(bl.Tips))
  2173  	  for i := range bl.Tips{
  2174  	      if chain.IsBlockSyncBlockHeight(bl.Tips[i]){
  2175  	        return BlockScore{bl.Tips[i], chain.Load_Height_for_BL_ID(bl.Tips[i])}
  2176  	      }
  2177  	       bases[i] = chain.FindTipBase(bl.Tips[i])
  2178  	  }*/
  2179  
  2180  	tips := chain.Get_Block_Past(dbtx, blid)
  2181  	if len(tips) == 0 {
  2182  		gbl := Generate_Genesis_Block()
  2183  		bs = BlockScore{gbl.GetHash(), 0, nil}
  2184  		return
  2185  	}
  2186  
  2187  	bases := make([]BlockScore, len(tips), len(tips))
  2188  	for i := range tips {
  2189  		if chain.IsBlockSyncBlockHeightSpecific(dbtx, tips[i], chain_height) {
  2190  			rlog.Tracef(2, "SYNC block %s", tips[i])
  2191  			bs = BlockScore{tips[i], chain.Load_Height_for_BL_ID(dbtx, tips[i]), nil}
  2192  			return
  2193  		}
  2194  		bases[i] = chain.FindTipBase(dbtx, tips[i], chain_height)
  2195  	}
  2196  
  2197  	sort_ascending_by_height(bases)
  2198  
  2199  	//   logger.Infof("return BASE %s",bases[0])
  2200  	bs = bases[0]
  2201  	return bs
  2202  }
  2203  
  2204  // this will find the sum of  work done ( skipping any repetive nodes )
  2205  // all the information is privided in unique_map
  2206  func (chain *Blockchain) FindTipWorkScore_internal(dbtx storage.DBTX, unique_map map[crypto.Hash]*big.Int, blid crypto.Hash, base crypto.Hash, base_height int64) {
  2207  	/*bl,err := chain.Load_BL_FROM_ID(blid)
  2208  	  if err != nil {
  2209  	   panic(fmt.Sprintf("Block NOT found %s", blid))
  2210  	  }
  2211  
  2212  
  2213  
  2214  	  for i := range bl.Tips{
  2215  	      if _,ok := unique_map[bl.Tips[i]];!ok{
  2216  
  2217  	          ordered := chain.Is_Block_Topological_order(bl.Tips[i])
  2218  	          if !ordered ||
  2219  	              ordered && chain.Load_Block_Topological_order(bl.Tips[i]) >= chain.Load_Block_Topological_order(base){
  2220  	                 chain.FindTipWorkScore_internal(unique_map,bl.Tips[i],base,base_height) // recursively process any nodes
  2221  	              }
  2222  	      }
  2223  	  }*/
  2224  
  2225  	tips := chain.Get_Block_Past(dbtx, blid)
  2226  
  2227  	for i := range tips {
  2228  		if _, ok := unique_map[tips[i]]; !ok {
  2229  
  2230  			ordered := chain.Is_Block_Topological_order(dbtx, tips[i])
  2231  
  2232  			if !ordered {
  2233  				chain.FindTipWorkScore_internal(dbtx, unique_map, tips[i], base, base_height) // recursively process any nodes
  2234  				//logger.Infof("IBlock is not ordered %s", tips[i])
  2235  			} else if ordered && chain.Load_Block_Topological_order(dbtx, tips[i]) >= chain.Load_Block_Topological_order(dbtx, base) {
  2236  				chain.FindTipWorkScore_internal(dbtx, unique_map, tips[i], base, base_height) // recursively process any nodes
  2237  
  2238  				//logger.Infof("IBlock ordered %s %d %d", tips[i],chain.Load_Block_Topological_order(tips[i]), chain.Load_Block_Topological_order(base) )
  2239  			}
  2240  		}
  2241  	}
  2242  
  2243  	unique_map[blid] = chain.Load_Block_Difficulty(dbtx, blid)
  2244  
  2245  }
  2246  
  2247  type cachekey struct {
  2248  	blid        crypto.Hash
  2249  	base        crypto.Hash
  2250  	base_height int64
  2251  }
  2252  
  2253  // find the score of the tip  in reference to  a base (NOTE: base is always a sync block otherwise results will be wrong )
  2254  func (chain *Blockchain) FindTipWorkScore(dbtx storage.DBTX, blid crypto.Hash, base crypto.Hash, base_height int64) (map[crypto.Hash]*big.Int, *big.Int) {
  2255  
  2256  	//logger.Infof("BASE %s",base)
  2257  	if tmp_map_i, ok := chain.lrucache_workscore.Get(cachekey{blid, base, base_height}); ok {
  2258  		work_score := tmp_map_i.(map[crypto.Hash]*big.Int)
  2259  
  2260  		map_copy := map[crypto.Hash]*big.Int{}
  2261  		score := new(big.Int).SetInt64(0)
  2262  		for k, v := range work_score {
  2263  			map_copy[k] = v
  2264  			score.Add(score, v)
  2265  		}
  2266  		return map_copy, score
  2267  	}
  2268  
  2269  	bl, err := chain.Load_BL_FROM_ID(dbtx, blid)
  2270  	if err != nil {
  2271  		panic(fmt.Sprintf("Block NOT found %s", blid))
  2272  	}
  2273  	unique_map := map[crypto.Hash]*big.Int{}
  2274  
  2275  	for i := range bl.Tips {
  2276  		if _, ok := unique_map[bl.Tips[i]]; !ok {
  2277  			//if chain.Load_Height_for_BL_ID(bl.Tips[i]) >  base_height {
  2278  			//    chain.FindTipWorkScore_internal(unique_map,bl.Tips[i],base,base_height) // recursively process any nodes
  2279  			//}
  2280  
  2281  			ordered := chain.Is_Block_Topological_order(dbtx, bl.Tips[i])
  2282  			if !ordered {
  2283  				chain.FindTipWorkScore_internal(dbtx, unique_map, bl.Tips[i], base, base_height) // recursively process any nodes
  2284  				//   logger.Infof("Block is not ordered %s", bl.Tips[i])
  2285  			} else if ordered && chain.Load_Block_Topological_order(dbtx, bl.Tips[i]) >= chain.Load_Block_Topological_order(dbtx, base) {
  2286  				chain.FindTipWorkScore_internal(dbtx, unique_map, bl.Tips[i], base, base_height) // recursively process any nodes
  2287  
  2288  				// logger.Infof("Block ordered %s %d %d", bl.Tips[i],chain.Load_Block_Topological_order(bl.Tips[i]), chain.Load_Block_Topological_order(base) )
  2289  			}
  2290  		}
  2291  	}
  2292  
  2293  	if base != blid {
  2294  		unique_map[base] = chain.Load_Block_Cumulative_Difficulty(dbtx, base)
  2295  		// add base cumulative score
  2296  		// base_work := chain.Load_Block_Cumulative_Difficulty(base)
  2297  		// gbl:=Generate_Genesis_Block()
  2298  		// _, base_work  := chain.FindTipWorkScore(base, gbl.GetHash(),0)
  2299  		//unique_map[base]= base_work
  2300  		//unique_map[base] = new(big.Int).SetUint64(base_work)
  2301  	}
  2302  
  2303  	/* if base_work == 0 {
  2304  	    logger.Infof("base Work done is zero %s", base)
  2305  	}*/
  2306  
  2307  	unique_map[blid] = chain.Load_Block_Difficulty(dbtx, blid)
  2308  	//unique_map[blid]= work_done
  2309  
  2310  	//if work_done == 0 {
  2311  	//    logger.Infof("Work done is zero")
  2312  	//}
  2313  
  2314  	score := new(big.Int).SetInt64(0)
  2315  	for _, v := range unique_map {
  2316  		score.Add(score, v)
  2317  	}
  2318  
  2319  	//set in cache, save a copy in cache
  2320  	{
  2321  		map_copy := map[crypto.Hash]*big.Int{}
  2322  		for k, v := range unique_map {
  2323  			map_copy[k] = v
  2324  		}
  2325  		chain.lrucache_workscore.Add(cachekey{blid, base, base_height}, map_copy)
  2326  	}
  2327  
  2328  	return unique_map, score
  2329  
  2330  }
  2331  
  2332  // this function finds a common base  which can be used to compare tips
  2333  // weight is replace by height
  2334  func (chain *Blockchain) find_common_base(dbtx storage.DBTX, tips []crypto.Hash) (base crypto.Hash, base_height int64) {
  2335  
  2336  
  2337  	scores := make([]BlockScore, len(tips), len(tips))
  2338  
  2339  	// var base crypto.Hash
  2340  	var best_height int64
  2341  	for i := range tips {
  2342  		tip_height := chain.Load_Height_for_BL_ID(dbtx, tips[i])
  2343  		if tip_height > best_height{
  2344  			best_height = tip_height
  2345  		}
  2346  	}
  2347  
  2348  
  2349  	for i := range tips {
  2350  		scores[i] = chain.FindTipBase(dbtx, tips[i],best_height) // we should chose the lowest weight
  2351  		scores[i].Height = chain.Load_Height_for_BL_ID(dbtx, scores[i].BLID)
  2352  	}
  2353  	// base is the lowest height
  2354  	sort_ascending_by_height(scores)
  2355  
  2356  	base = scores[0].BLID
  2357  	base_height = scores[0].Height
  2358  
  2359  	return
  2360  
  2361  }
  2362  
  2363  // this function finds a common base  which can be used to compare tips based on cumulative difficulty
  2364  func (chain *Blockchain) find_best_tip(dbtx storage.DBTX, tips []crypto.Hash, base crypto.Hash, base_height int64) (best crypto.Hash) {
  2365  
  2366  	tips_scores := make([]BlockScore, len(tips), len(tips))
  2367  
  2368  	for i := range tips {
  2369  		tips_scores[i].BLID = tips[i] // we should chose the lowest weight
  2370  		_, tips_scores[i].Cumulative_Difficulty = chain.FindTipWorkScore(dbtx, tips[i], base, base_height)
  2371  	}
  2372  
  2373  	sort_descending_by_cumulative_difficulty(tips_scores)
  2374  
  2375  	best = tips_scores[0].BLID
  2376  	//   base_height = scores[0].Weight
  2377  
  2378  	return best
  2379  
  2380  }
  2381  
  2382  func (chain *Blockchain) calculate_mainchain_distance_internal_recursive(dbtx storage.DBTX, unique_map map[crypto.Hash]int64, blid crypto.Hash) {
  2383  	tips := chain.Get_Block_Past(dbtx, blid)
  2384  	for i := range tips {
  2385  		ordered := chain.Is_Block_Topological_order(dbtx, tips[i])
  2386  		if ordered {
  2387  			unique_map[tips[i]] = chain.Load_Height_for_BL_ID(dbtx, tips[i])
  2388  		} else {
  2389  			chain.calculate_mainchain_distance_internal_recursive(dbtx, unique_map, tips[i]) // recursively process any nodes
  2390  		}
  2391  	}
  2392  	return
  2393  }
  2394  
  2395  // NOTE: some of the past may not be in the main chain  right now and need to be travelled recursively
  2396  // distance is number of hops to find a node, which is itself
  2397  func (chain *Blockchain) calculate_mainchain_distance(dbtx storage.DBTX, blid crypto.Hash) int64 {
  2398  
  2399  	unique_map := map[crypto.Hash]int64{}
  2400  	//tips := chain.Get_Block_Past(dbtx, blid)
  2401  
  2402  	//fmt.Printf("tips  %+v \n", tips)
  2403  
  2404  	// if the block is already in order, no need to look back
  2405  
  2406  	ordered := chain.Is_Block_Topological_order(dbtx, blid)
  2407  	if ordered {
  2408  		unique_map[blid] = chain.Load_Height_for_BL_ID(dbtx, blid)
  2409  	} else {
  2410  		chain.calculate_mainchain_distance_internal_recursive(dbtx, unique_map, blid)
  2411  	}
  2412  
  2413  	//for i := range tips {
  2414  	//}
  2415  
  2416  	//fmt.Printf("unique_map %+v \n", unique_map)
  2417  
  2418  	lowest_height := int64(0x7FFFFFFFFFFFFFFF) // max possible
  2419  	// now we need to find the lowest height
  2420  	for k, v := range unique_map {
  2421  		_ = k
  2422  		if lowest_height >= v {
  2423  			lowest_height = v
  2424  		}
  2425  	}
  2426  
  2427  	return int64(lowest_height)
  2428  }
  2429  
  2430  // converts a DAG's partial order into a full order, this function is recursive
  2431  // generate full order should be only callled on the basis of base blocks which satisfy sync block properties as follows
  2432  // generate full order is called on maximum weight tip at every tip change
  2433  // blocks are ordered recursively, till we find a find a block  which is already in the chain
  2434  func (chain *Blockchain) Generate_Full_Order(dbtx storage.DBTX, blid crypto.Hash, base crypto.Hash, base_height int64, level int) (order_bucket []crypto.Hash) {
  2435  
  2436  	// return from cache if possible
  2437  	if tmp_order, ok := chain.lrucache_fullorder.Get(cachekey{blid, base, base_height}); ok {
  2438  		order := tmp_order.([]crypto.Hash)
  2439  		order_bucket = make([]crypto.Hash, len(order), len(order))
  2440  		copy(order_bucket, order[0:])
  2441  		return
  2442  	}
  2443  
  2444  	bl, err := chain.Load_BL_FROM_ID(dbtx, blid)
  2445  	if err != nil {
  2446  		panic(fmt.Sprintf("Block NOT found %s", blid))
  2447  	}
  2448  
  2449  	if len(bl.Tips) == 0 {
  2450  		gbl := Generate_Genesis_Block()
  2451  		order_bucket = append(order_bucket, gbl.GetHash())
  2452  		return
  2453  	}
  2454  
  2455  	// if the block has been previously ordered,  stop the recursion and return it as base
  2456  	//if chain.Is_Block_Topological_order(blid){
  2457  	if blid == base {
  2458  		order_bucket = append(order_bucket, blid)
  2459  		// logger.Infof("Generate order base reached  base %s", base)
  2460  		return
  2461  	}
  2462  
  2463  	// we need to order previous tips first
  2464  	var tips_scores []BlockScore
  2465  	//tips_scores := make([]BlockScore,len(bl.Tips),len(bl.Tips))
  2466  
  2467  	node_maps := map[crypto.Hash]map[crypto.Hash]*big.Int{}
  2468  	_ = node_maps
  2469  	for i := range bl.Tips {
  2470  
  2471  		ordered := chain.Is_Block_Topological_order(dbtx, bl.Tips[i])
  2472  
  2473  		if !ordered {
  2474  			var score BlockScore
  2475  			score.BLID = bl.Tips[i]
  2476  			//node_maps[bl.Tips[i]], score.Weight = chain.FindTipWorkScore(bl.Tips[i],base,base_height)
  2477  			score.Cumulative_Difficulty = chain.Load_Block_Cumulative_Difficulty(dbtx, bl.Tips[i])
  2478  
  2479  			tips_scores = append(tips_scores, score)
  2480  
  2481  		} else if ordered && chain.Load_Block_Topological_order(dbtx, bl.Tips[i]) >= chain.Load_Block_Topological_order(dbtx, base) {
  2482  
  2483  			//  logger.Infof("Generate order topo order wrt base %d %d", chain.Load_Block_Topological_order(dbtx,bl.Tips[i]), chain.Load_Block_Topological_order(dbtx,base))
  2484  			var score BlockScore
  2485  			score.BLID = bl.Tips[i]
  2486  
  2487  			//score.Weight = chain.Load_Block_Cumulative_Difficulty(bl.Tips[i])
  2488  			score.Cumulative_Difficulty = chain.Load_Block_Cumulative_Difficulty(dbtx, bl.Tips[i])
  2489  
  2490  			tips_scores = append(tips_scores, score)
  2491  		}
  2492  
  2493  	}
  2494  
  2495  	sort_descending_by_cumulative_difficulty(tips_scores)
  2496  
  2497  	// now we must add the nodes in the topographical order
  2498  
  2499  	for i := range tips_scores {
  2500  		tmp_bucket := chain.Generate_Full_Order(dbtx, tips_scores[i].BLID, base, base_height, level+1)
  2501  		for j := range tmp_bucket {
  2502  			//only process if  this block is unsettled
  2503  			//if !chain.IsBlockSettled(tmp_bucket[j]) {
  2504  			// if order is already decided, do not order it again
  2505  			if !sliceExists(order_bucket, tmp_bucket[j]) {
  2506  				order_bucket = append(order_bucket, tmp_bucket[j])
  2507  			}
  2508  			//}
  2509  		}
  2510  	}
  2511  	// add self to the end, since all past nodes have been ordered
  2512  	order_bucket = append(order_bucket, blid)
  2513  
  2514  	//  logger.Infof("Generate Order %s %+v  %+v", blid , order_bucket, tips_scores)
  2515  
  2516  	//set in cache, save a copy in cache
  2517  	{
  2518  		order_copy := make([]crypto.Hash, len(order_bucket), len(order_bucket))
  2519  		copy(order_copy, order_bucket[0:])
  2520  
  2521  		chain.lrucache_fullorder.Add(cachekey{blid, base, base_height}, order_copy)
  2522  	}
  2523  
  2524  	if level == 0 {
  2525  		//logger.Warnf("generating full order for block %s %d", blid, level)
  2526  		/*
  2527  		   for i := range order_bucket{
  2528  		            logger.Infof("%2d  %s", i, order_bucket[i])
  2529  		   }
  2530  		*/
  2531  		//logger.Warnf("generating full order finished")
  2532  	}
  2533  	return
  2534  }
  2535  
  2536  // this function finds a block at specific height whether it is a sync block
  2537  // if yes we generate a full order and settle the chain upto that level
  2538  /*
  2539  func (chain *Blockchain) SettleChainAtHeight(height uint64) {
  2540      blocks := chain.Get_Blocks_At_Height(height)
  2541       for i := range blocks {
  2542           if !chain.IsBlockSettled(blocks[i]) && chain.IsBlockSyncBlock(blocks[i]){ // only unsettled blocks must be settled
  2543               order := chain.Generate_Full_Order(blocks[i],0)
  2544               logger.Warnf("Chain hash been settled at height %d order %+v", height,order)
  2545           }
  2546  
  2547       }
  2548  
  2549  
  2550  
  2551  
  2552  }
  2553  */
  2554  var node_map = map[crypto.Hash]bool{}
  2555  
  2556  func collect_nodes(chain *Blockchain, dbtx storage.DBTX, blid crypto.Hash) {
  2557  	future := chain.Get_Block_Future(dbtx, blid)
  2558  	for i := range future {
  2559  		//node_map[future[i]]=true
  2560  
  2561  		if _, ok := node_map[future[i]]; !ok {
  2562  			collect_nodes(chain, dbtx, future[i]) // recursive add node
  2563  		}
  2564  	}
  2565  
  2566  	node_map[blid] = true
  2567  
  2568  }
  2569  
  2570  func writenode(chain *Blockchain, dbtx storage.DBTX, w *bufio.Writer, blid crypto.Hash) { // process a node, recursively
  2571  
  2572  	collect_nodes(chain, dbtx, blid)
  2573  
  2574  	sync_blocks := map[crypto.Hash]uint64{}
  2575  
  2576  	for k, _ := range node_map {
  2577  		if chain.IsBlockSyncBlockHeight(dbtx, k) {
  2578  			// sync_blocks = append(sync_blocks,
  2579  			sync_blocks[k] = uint64(chain.Load_Height_for_BL_ID(dbtx, k))
  2580  		}
  2581  	}
  2582  
  2583  	w.WriteString(fmt.Sprintf("node [ fontsize=12 style=filled ]\n{\n"))
  2584  	for k := range node_map {
  2585  
  2586  		//anticone := chain.Get_AntiCone_Unsettled(k)
  2587  
  2588  		color := "white"
  2589  
  2590  		if chain.IsBlockSyncBlockHeight(dbtx, k) {
  2591  			color = "green"
  2592  		}
  2593  
  2594  		/*
  2595  		   logger.Infof("Scores for %s",k)
  2596  
  2597  		   height := chain.Load_Height_for_BL_ID(k)
  2598  
  2599  		   for base,base_height := range  sync_blocks {
  2600  		       if height > base_height {
  2601  		           work_data, work_score := chain.FindTipWorkScore(k,base,base_height)
  2602  		        logger.Infof("Scores base %s height %5d  base_height %5d work %d  fff",base,height, base_height,work_score)
  2603  
  2604  		        _ = work_data
  2605  		        for k1,v1 := range work_data{
  2606  		            _ = k1
  2607  		            _ = v1
  2608  		           // logger.Infof("score consists of %s %d  topo index %d",k1,v1, chain.Load_Block_Topological_order(k1))
  2609  		        }
  2610  
  2611  		        full_order := chain.Generate_Full_Order(k, base, base_height,0)
  2612  		        for j := range full_order {
  2613  		            _ = j
  2614  		        // logger.Infof("full order %d %s", j, full_order[j])
  2615  		        }
  2616  
  2617  		       }
  2618  		   }
  2619  		*/
  2620  
  2621  		/*
  2622  		   if  len(anticone) >=4{
  2623  		       color = "red"
  2624  		   }
  2625  
  2626  		   if chain.IsBlockSyncBlock(k) &&  len(anticone) >=4{
  2627  		       color = "orange"
  2628  		   }
  2629  		*/
  2630  
  2631  		/*gbl := Generate_Genesis_Block()
  2632  		gbl_map, cumulative_difficulty := chain.FindTipWorkScore(dbtx, k, gbl.GetHash(), 0)
  2633  
  2634  		if cumulative_difficulty.Cmp(chain.Load_Block_Cumulative_Difficulty(dbtx, k)) != 0 {
  2635  			logger.Infof("workmap from genesis  MISMATCH  blid %s height %d", k, chain.Load_Height_for_BL_ID(dbtx, k))
  2636  			for k, v := range gbl_map {
  2637  				logger.Infof("%s %d", k, v)
  2638  			}
  2639  
  2640  		}*/
  2641  
  2642  		//w.WriteString(fmt.Sprintf("L%s  [ fillcolor=%s label = \"%s %d height %d score %d stored %d order %d\"  ];\n", k.String(), color, k.String(), 0, chain.Load_Height_for_BL_ID(dbtx, k), cumulative_difficulty, chain.Load_Block_Cumulative_Difficulty(dbtx, k), chain.Load_Block_Topological_order(dbtx, k)))
  2643  		w.WriteString(fmt.Sprintf("L%s  [ fillcolor=%s label = \"%s %d height %d score %d stored %d order %d\"  ];\n", k.String(), color, k.String(), 0, chain.Load_Height_for_BL_ID(dbtx, k), 0, chain.Load_Block_Cumulative_Difficulty(dbtx, k), chain.Load_Block_Topological_order(dbtx, k)))
  2644  	}
  2645  	w.WriteString(fmt.Sprintf("}\n"))
  2646  
  2647  	// now dump the interconnections
  2648  	for k := range node_map {
  2649  		future := chain.Get_Block_Future(dbtx, k)
  2650  		for i := range future {
  2651  			w.WriteString(fmt.Sprintf("L%s -> L%s ;\n", k.String(), future[i].String()))
  2652  		}
  2653  
  2654  	}
  2655  }
  2656  
  2657  func WriteBlockChainTree(chain *Blockchain, filename string) (err error) {
  2658  
  2659  	dbtx, err := chain.store.BeginTX(false)
  2660  	if err != nil {
  2661  		logger.Warnf("Could NOT add block to chain. Error opening writable TX, err %s", err)
  2662  		return
  2663  	}
  2664  
  2665  	defer dbtx.Rollback()
  2666  
  2667  	f, err := os.Create(filename)
  2668  	if err != nil {
  2669  		return
  2670  	}
  2671  	defer f.Close()
  2672  
  2673  	w := bufio.NewWriter(f)
  2674  	defer w.Flush()
  2675  	w.WriteString("digraph dero_blockchain_graph { \n")
  2676  
  2677  	blid, err := chain.Load_Block_Topological_order_at_index(nil, 158800)
  2678  	if err != nil {
  2679  		logger.Warnf("Cannot get block  at topoheight %d err: %s", 158800, err)
  2680  		return
  2681  	}
  2682  
  2683  	writenode(chain, dbtx, w, blid)
  2684  	/*g := Generate_Genesis_Block()
  2685  	writenode(chain, dbtx, w, g.GetHash())
  2686  	*/
  2687  	w.WriteString("}\n")
  2688  
  2689  	return
  2690  }