github.com/piotrnar/gocoin@v0.0.0-20240512203912-faa0448c5e96/lib/chain/blockdb.go (about)

     1  package chain
     2  
     3  import (
     4  	"bufio"
     5  	"bytes"
     6  	"compress/gzip"
     7  	"encoding/binary"
     8  	"errors"
     9  	"fmt"
    10  	"io"
    11  	"io/ioutil"
    12  	"os"
    13  	"sync"
    14  	"time"
    15  
    16  	"github.com/piotrnar/gocoin/lib/btc"
    17  	"github.com/piotrnar/gocoin/lib/others/snappy"
    18  )
    19  
    20  const (
    21  	BLOCK_TRUSTED = 0x01
    22  	BLOCK_INVALID = 0x02
    23  	BLOCK_COMPRSD = 0x04
    24  	BLOCK_SNAPPED = 0x08
    25  	BLOCK_LENGTH  = 0x10
    26  	BLOCK_INDEX   = 0x20
    27  
    28  	MAX_BLOCKS_TO_WRITE = 1024 // flush the data to disk when exceeding
    29  	MAX_DATA_WRITE      = 16 * 1024 * 1024
    30  )
    31  
    32  /*
    33  	blockchain.dat - contains raw blocks data, no headers, nothing
    34  	blockchain.new - contains records of 136 bytes (all values LSB):
    35  		[0] - flags:
    36  			bit(0) - "trusted" flag - this block's scripts have been verified
    37  			bit(1) - "invalid" flag - this block's scripts have failed
    38  			bit(2) - "compressed" flag - this block's data is compressed
    39  			bit(3) - "snappy" flag - this block is compressed with snappy (not gzip'ed)
    40  			bit(4) - if this bit is set, bytes [32:36] carry length of uncompressed block
    41  			bit(5) - if this bit is set, bytes [28:32] carry data file index
    42  
    43  		Used to be:
    44  		[4:36]  - 256-bit block hash - DEPRECATED! (hash the header to get the value)
    45  
    46  		[4:28] - reserved
    47  		[28:32] - specifies which blockchain.dat file is used (if not zero, the filename is: blockchain-%08x.dat)
    48  		[32:36] - length of uncompressed block
    49  
    50  		[36:40] - 32-bit block height (genesis is 0)
    51  		[40:48] - 64-bit block pos in blockchain.dat file
    52  		[48:52] - 32-bit block lenght in bytes
    53  		[52:56] - 32-bit number of transaction in the block
    54  		[56:136] - 80 bytes blocks header
    55  */
    56  
    57  type oneBl struct {
    58  	fpos uint64 // where at the block is stored in blockchain.dat
    59  	ipos int64  // where at the record is stored in blockchain.idx (used to set flags) / -1 if not stored in the file (yet)
    60  	blen uint32 // how long the block is in blockchain.dat
    61  	olen uint32 // original length fo the block (before compression)
    62  
    63  	datfileidx uint32 // use different blockchain.dat (if not zero, the filename is: blockchain-%08x.dat)
    64  
    65  	trusted    bool
    66  	compressed bool
    67  	snappied   bool
    68  }
    69  
    70  type BlckCachRec struct {
    71  	Data []byte
    72  	*btc.Block
    73  
    74  	// This is for BIP152
    75  	BIP152 []byte // 8 bytes of nonce || 8 bytes of K0 LSB || 8 bytes of K1 LSB
    76  
    77  	LastUsed time.Time
    78  }
    79  
    80  type BlockDBOpts struct {
    81  	MaxCachedBlocks int
    82  	MaxDataFileSize uint64
    83  	DataFilesKeep   uint32
    84  	DataFilesBackup bool
    85  	CompressOnDisk  bool
    86  }
    87  
    88  type oneB2W struct {
    89  	idx     [btc.Uint256IdxLen]byte
    90  	h       [32]byte
    91  	data    []byte
    92  	height  uint32
    93  	txcount uint32
    94  }
    95  
    96  type BlockDB struct {
    97  	dirname            string
    98  	blockIndex         map[[btc.Uint256IdxLen]byte]*oneBl
    99  	blockdata          *os.File
   100  	blockindx          *os.File
   101  	mutex, disk_access sync.Mutex
   102  	max_cached_blocks  int
   103  	cache              map[[btc.Uint256IdxLen]byte]*BlckCachRec
   104  
   105  	maxidxfilepos, maxdatfilepos int64
   106  	maxdatfileidx                uint32
   107  	do_not_compress              bool
   108  
   109  	blocksToWrite chan oneB2W
   110  	datToWrite    uint64
   111  
   112  	max_data_file_size uint64
   113  	data_files_keep    uint32
   114  	data_files_backup  bool
   115  	data_files_done    sync.WaitGroup
   116  }
   117  
   118  func NewBlockDBExt(dir string, opts *BlockDBOpts) (db *BlockDB) {
   119  	db = new(BlockDB)
   120  	db.dirname = dir
   121  	if db.dirname != "" && db.dirname[len(db.dirname)-1] != '/' && db.dirname[len(db.dirname)-1] != '\\' {
   122  		db.dirname += "/"
   123  	}
   124  	db.blockIndex = make(map[[btc.Uint256IdxLen]byte]*oneBl)
   125  	os.MkdirAll(db.dirname, 0770)
   126  
   127  	db.blockindx, _ = os.OpenFile(db.dirname+"blockchain.new", os.O_RDWR|os.O_CREATE, 0660)
   128  	if db.blockindx == nil {
   129  		panic("Cannot open blockchain.new")
   130  	}
   131  
   132  	if opts != nil {
   133  		if opts.MaxCachedBlocks > 0 {
   134  			db.max_cached_blocks = opts.MaxCachedBlocks
   135  		}
   136  		db.max_data_file_size = opts.MaxDataFileSize
   137  		db.data_files_keep = opts.DataFilesKeep
   138  		db.data_files_backup = opts.DataFilesBackup
   139  		db.do_not_compress = !opts.CompressOnDisk
   140  	}
   141  
   142  	if db.max_cached_blocks == 0 {
   143  		db.max_cached_blocks = 100 // default
   144  	}
   145  	db.cache = make(map[[btc.Uint256IdxLen]byte]*BlckCachRec, db.max_cached_blocks)
   146  
   147  	db.blocksToWrite = make(chan oneB2W, MAX_BLOCKS_TO_WRITE)
   148  	return
   149  }
   150  
   151  func NewBlockDB(dir string) (db *BlockDB) {
   152  	return NewBlockDBExt(dir, &BlockDBOpts{MaxCachedBlocks: 500})
   153  }
   154  
   155  // Make sure to call with the mutex locked
   156  func (db *BlockDB) addToCache(h *btc.Uint256, bl []byte, str *btc.Block) (crec *BlckCachRec) {
   157  	if db.cache == nil {
   158  		return
   159  	}
   160  	crec = db.cache[h.BIdx()]
   161  	if crec != nil {
   162  		crec.LastUsed = time.Now()
   163  		return
   164  	}
   165  	for len(db.cache) >= db.max_cached_blocks {
   166  		var oldest_t time.Time
   167  		var oldest_k [btc.Uint256IdxLen]byte
   168  		for k, v := range db.cache {
   169  			if oldest_t.IsZero() || v.LastUsed.Before(oldest_t) {
   170  				if rec := db.blockIndex[k]; rec.ipos != -1 {
   171  					// don't expire records that have not been writen to disk yet
   172  					oldest_t = v.LastUsed
   173  					oldest_k = k
   174  				}
   175  			}
   176  		}
   177  		if oldest_t.IsZero() {
   178  			break
   179  		} else {
   180  			delete(db.cache, oldest_k)
   181  		}
   182  	}
   183  	crec = &BlckCachRec{LastUsed: time.Now(), Data: bl, Block: str}
   184  	db.cache[h.BIdx()] = crec
   185  	return
   186  }
   187  
   188  func (db *BlockDB) GetStats() (s string) {
   189  	db.mutex.Lock()
   190  	s += fmt.Sprintf("BlockDB: %d blocks, %d/%d in cache.  ToWriteCnt:%d (%dKB)\n",
   191  		len(db.blockIndex), len(db.cache), db.max_cached_blocks, len(db.blocksToWrite), db.datToWrite>>10)
   192  	db.mutex.Unlock()
   193  	return
   194  }
   195  
   196  func hash2idx(h []byte) (idx [btc.Uint256IdxLen]byte) {
   197  	copy(idx[:], h[:btc.Uint256IdxLen])
   198  	return
   199  }
   200  
   201  func (db *BlockDB) BlockAdd(height uint32, bl *btc.Block) (e error) {
   202  	var trust_it bool
   203  	var flush bool
   204  
   205  	db.mutex.Lock()
   206  	idx := bl.Hash.BIdx()
   207  	if rec, ok := db.blockIndex[idx]; !ok {
   208  		db.blockIndex[idx] = &oneBl{ipos: -1, trusted: bl.Trusted.Get()}
   209  		db.addToCache(bl.Hash, bl.Raw, bl)
   210  		db.datToWrite += uint64(len(bl.Raw))
   211  		db.blocksToWrite <- oneB2W{idx: idx, h: bl.Hash.Hash, data: bl.Raw, height: height, txcount: uint32(bl.TxCount)}
   212  		flush = len(db.blocksToWrite) >= MAX_BLOCKS_TO_WRITE || db.datToWrite >= MAX_DATA_WRITE
   213  	} else {
   214  		//println("Block", bl.Hash.String(), "already in", rec.trusted, bl.Trusted)
   215  		if !rec.trusted && bl.Trusted.Get() {
   216  			//println(" ... but now it's getting trusted")
   217  			if rec.ipos == -1 {
   218  				// It's not saved yet - just change the flag
   219  				rec.trusted = true
   220  			} else {
   221  				trust_it = true
   222  			}
   223  		}
   224  	}
   225  	db.mutex.Unlock()
   226  
   227  	if trust_it {
   228  		//println(" ... in the slow mode")
   229  		db.BlockTrusted(bl.Hash.Hash[:])
   230  	}
   231  
   232  	if flush {
   233  		//println("Too many blocksToWrite - flush the data...")
   234  		if !db.writeAll() {
   235  			panic("many to write but nothing stored")
   236  		}
   237  		//println("flush done")
   238  	}
   239  
   240  	return
   241  }
   242  
   243  func (db *BlockDB) writeAll() (sync bool) {
   244  	//sta := time.Now()
   245  	for db.writeOne() {
   246  		sync = true
   247  	}
   248  	if sync {
   249  		db.blockdata.Sync()
   250  		db.blockindx.Sync()
   251  		//println("Block(s) saved in", time.Now().Sub(sta).String())
   252  	}
   253  	return
   254  }
   255  
   256  func (db *BlockDB) removeDatFile(idx uint32) {
   257  	var remove bool
   258  	dat_file := db.dat_fname(idx, false)
   259  	if db.data_files_backup {
   260  		os.Mkdir(db.dirname+"oldat", 0770)
   261  		bak_file := db.dat_fname(idx, true)
   262  		if er := os.Rename(dat_file, bak_file); er != nil {
   263  			// if we try to move the file across different file systems, it will end up here
   264  			if df, er := os.Open(dat_file); er == nil {
   265  				if bf, er := os.Create(bak_file); er == nil {
   266  					if _, er := io.Copy(bf, df); er == nil {
   267  						remove = true
   268  					} else {
   269  						println("blockdb.RDF-A:", er.Error())
   270  					}
   271  					bf.Close()
   272  				} else {
   273  					println("blockdb.RDF-B:", er.Error())
   274  				}
   275  				df.Close()
   276  			} else {
   277  				println("blockdb.RDF-C:", er.Error())
   278  			}
   279  		}
   280  	} else {
   281  		remove = true
   282  	}
   283  
   284  	if remove {
   285  		if er := os.Remove(dat_file); er != nil {
   286  			if er := os.Rename(dat_file, dat_file+".tmp"); er == nil {
   287  				// It was probably open by GetBlock()
   288  				// Rename it, wait one second and try again...
   289  				time.Sleep(1e9)
   290  				if er := os.Remove(dat_file + ".tmp"); er != nil {
   291  					println("failed to remove", dat_file+".tmp", "because", er.Error())
   292  				}
   293  			} else {
   294  				println("failed to remove", dat_file, "because", er.Error())
   295  			}
   296  		}
   297  	}
   298  
   299  	db.data_files_done.Done()
   300  }
   301  
   302  func (db *BlockDB) writeOne() (written bool) {
   303  	var fl [136]byte
   304  	var rec *oneBl
   305  	var b2w oneB2W
   306  	var e error
   307  
   308  	select {
   309  	case b2w = <-db.blocksToWrite:
   310  
   311  	default:
   312  		return
   313  	}
   314  
   315  	db.mutex.Lock()
   316  	db.datToWrite -= uint64(len(b2w.data))
   317  	rec = db.blockIndex[b2w.idx]
   318  	db.mutex.Unlock()
   319  
   320  	if rec == nil || rec.ipos != -1 {
   321  		println("Block not in the index anymore - discard")
   322  		written = true
   323  		return
   324  	}
   325  
   326  	db.disk_access.Lock()
   327  
   328  	var cbts []byte
   329  	if db.do_not_compress {
   330  		rec.compressed, rec.snappied = false, false
   331  		cbts = b2w.data
   332  	} else {
   333  		rec.compressed, rec.snappied = true, true
   334  		cbts = snappy.Encode(nil, b2w.data)
   335  	}
   336  	rec.blen = uint32(len(cbts))
   337  	rec.ipos = db.maxidxfilepos
   338  
   339  	if db.max_data_file_size != 0 && uint64(db.maxdatfilepos)+uint64(len(cbts)) > db.max_data_file_size {
   340  		if tmpf, _ := os.Create(db.dat_fname(db.maxdatfileidx+1, false)); tmpf != nil {
   341  			db.blockdata.Close()
   342  			db.blockdata = tmpf
   343  			db.maxdatfilepos = 0
   344  			if db.data_files_keep != 0 && db.maxdatfileidx >= db.data_files_keep {
   345  				db.data_files_done.Add(1)
   346  				go db.removeDatFile(db.maxdatfileidx - db.data_files_keep)
   347  			}
   348  			db.maxdatfileidx++
   349  		} else {
   350  			println("Cannot create", db.dat_fname(db.maxdatfileidx, false))
   351  		}
   352  	}
   353  
   354  	rec.datfileidx = db.maxdatfileidx
   355  	rec.fpos = uint64(db.maxdatfilepos)
   356  	if rec.compressed {
   357  		fl[0] |= BLOCK_COMPRSD
   358  	}
   359  	if rec.snappied {
   360  		fl[0] |= BLOCK_SNAPPED
   361  	}
   362  	if rec.trusted {
   363  		fl[0] |= BLOCK_TRUSTED
   364  	}
   365  
   366  	//copy(fl[4:32], b2w.h[:28])
   367  	fl[0] |= BLOCK_LENGTH | BLOCK_INDEX
   368  	binary.LittleEndian.PutUint32(fl[32:36], uint32(len(b2w.data)))
   369  	binary.LittleEndian.PutUint32(fl[28:32], rec.datfileidx)
   370  
   371  	binary.LittleEndian.PutUint32(fl[36:40], uint32(b2w.height))
   372  	binary.LittleEndian.PutUint64(fl[40:48], uint64(rec.fpos))
   373  	binary.LittleEndian.PutUint32(fl[48:52], uint32(rec.blen))
   374  	binary.LittleEndian.PutUint32(fl[52:56], uint32(b2w.txcount))
   375  	copy(fl[56:136], b2w.data[:80])
   376  
   377  	if _, e = db.blockdata.Write(cbts); e != nil {
   378  		panic(e.Error())
   379  	}
   380  
   381  	if _, e = db.blockindx.Write(fl[:]); e != nil {
   382  		panic(e.Error())
   383  	}
   384  
   385  	db.maxidxfilepos += 136
   386  	db.maxdatfilepos += int64(rec.blen)
   387  
   388  	db.disk_access.Unlock()
   389  
   390  	written = true
   391  
   392  	return
   393  }
   394  
   395  func (db *BlockDB) BlockInvalid(hash []byte) {
   396  	idx := btc.NewUint256(hash).BIdx()
   397  	db.mutex.Lock()
   398  	cur, ok := db.blockIndex[idx]
   399  	if !ok {
   400  		db.mutex.Unlock()
   401  		println("BlockInvalid: no such block", btc.NewUint256(hash).String())
   402  		return
   403  	}
   404  	if cur.trusted {
   405  		println("Looks like your UTXO database is corrupt")
   406  		println("To rebuild it, remove folder: " + db.dirname + "unspent4")
   407  		panic("Trusted block cannot be invalid")
   408  	}
   409  	//println("mark", btc.NewUint256(hash).String(), "as invalid")
   410  	if cur.ipos == -1 {
   411  		// if not written yet, then never write it
   412  		delete(db.cache, idx)
   413  		delete(db.blockIndex, idx)
   414  	} else {
   415  		// write the new flag to disk
   416  		db.setBlockFlag(cur, BLOCK_INVALID)
   417  	}
   418  	db.mutex.Unlock()
   419  }
   420  
   421  func (db *BlockDB) BlockTrusted(hash []byte) {
   422  	idx := btc.NewUint256(hash).BIdx()
   423  	db.mutex.Lock()
   424  	cur, ok := db.blockIndex[idx]
   425  	if !ok {
   426  		db.mutex.Unlock()
   427  		println("BlockTrusted: no such block")
   428  		return
   429  	}
   430  	if !cur.trusted {
   431  		//fmt.Println("mark", btc.NewUint256(hash).String(), "as trusted")
   432  		db.setBlockFlag(cur, BLOCK_TRUSTED)
   433  	}
   434  	db.mutex.Unlock()
   435  }
   436  
   437  func (db *BlockDB) setBlockFlag(cur *oneBl, fl byte) {
   438  	var b [1]byte
   439  	cur.trusted = true
   440  	db.disk_access.Lock()
   441  	cpos, _ := db.blockindx.Seek(0, os.SEEK_CUR) // remember our position
   442  	db.blockindx.ReadAt(b[:], cur.ipos)
   443  	b[0] |= fl
   444  	db.blockindx.WriteAt(b[:], cur.ipos)
   445  	db.blockindx.Seek(cpos, os.SEEK_SET) // restore the end posistion
   446  	db.disk_access.Unlock()
   447  }
   448  
   449  func (db *BlockDB) Idle() {
   450  	if db.writeAll() {
   451  		//println(" * block(s) stored from idle")
   452  	}
   453  }
   454  
   455  func (db *BlockDB) Close() {
   456  	if db.writeAll() {
   457  		//println(" * block(s) stored from close")
   458  	}
   459  	db.data_files_done.Wait()
   460  	db.blockdata.Close()
   461  	db.blockindx.Close()
   462  }
   463  
   464  func (db *BlockDB) BlockGetInternal(hash *btc.Uint256, do_not_cache bool) (cacherec *BlckCachRec, trusted bool, e error) {
   465  	db.mutex.Lock()
   466  	rec, ok := db.blockIndex[hash.BIdx()]
   467  	if !ok {
   468  		db.mutex.Unlock()
   469  		e = errors.New("Block not in the index")
   470  		return
   471  	}
   472  
   473  	trusted = rec.trusted
   474  	if db.cache != nil {
   475  		if crec, hit := db.cache[hash.BIdx()]; hit {
   476  			cacherec = crec
   477  			crec.LastUsed = time.Now()
   478  			db.mutex.Unlock()
   479  			return
   480  		}
   481  	}
   482  	db.mutex.Unlock()
   483  
   484  	if rec.ipos == -1 {
   485  		e = errors.New("Block not written yet and not in the cache")
   486  		return
   487  	}
   488  
   489  	if rec.blen == 0 {
   490  		e = errors.New("Block purged from disk")
   491  		return
   492  	}
   493  
   494  	bl := make([]byte, rec.blen)
   495  
   496  	db.disk_access.Lock()
   497  
   498  	var f *os.File
   499  	// we will re-open the data file, to not spoil the writting pointer
   500  	f, e = os.Open(db.dat_fname(rec.datfileidx, false))
   501  	if f == nil || e != nil {
   502  		f, e = os.Open(db.dat_fname(rec.datfileidx, true))
   503  		if f == nil || e != nil {
   504  			db.disk_access.Unlock()
   505  			return
   506  		}
   507  	}
   508  
   509  	_, e = f.Seek(int64(rec.fpos), os.SEEK_SET)
   510  	if e != nil {
   511  		f.Close()
   512  		db.disk_access.Unlock()
   513  		return
   514  	}
   515  
   516  	_, e = io.ReadFull(f, bl)
   517  	f.Close()
   518  	db.disk_access.Unlock()
   519  
   520  	if e != nil {
   521  		return
   522  	}
   523  
   524  	if rec.compressed {
   525  		if rec.snappied {
   526  			bl, _ = snappy.Decode(nil, bl)
   527  			if bl == nil {
   528  				e = errors.New("snappy.Decode() failed")
   529  			}
   530  		} else {
   531  			gz, _ := gzip.NewReader(bytes.NewReader(bl))
   532  			bl, _ = ioutil.ReadAll(gz)
   533  			gz.Close()
   534  		}
   535  	}
   536  
   537  	if rec.olen == 0 {
   538  		rec.olen = uint32(len(bl))
   539  	}
   540  
   541  	if !do_not_cache {
   542  		db.mutex.Lock()
   543  		cacherec = db.addToCache(hash, bl, nil)
   544  		db.mutex.Unlock()
   545  	} else {
   546  		cacherec = &BlckCachRec{Data: bl}
   547  	}
   548  
   549  	return
   550  }
   551  
   552  func (db *BlockDB) BlockGetExt(hash *btc.Uint256) (cacherec *BlckCachRec, trusted bool, e error) {
   553  	return db.BlockGetInternal(hash, false)
   554  }
   555  
   556  func (db *BlockDB) BlockGet(hash *btc.Uint256) (bl []byte, trusted bool, e error) {
   557  	var rec *BlckCachRec
   558  	rec, trusted, e = db.BlockGetInternal(hash, false)
   559  	if rec != nil {
   560  		bl = rec.Data
   561  	}
   562  	return
   563  }
   564  
   565  func (db *BlockDB) BlockLength(hash *btc.Uint256, decode_if_needed bool) (length uint32, e error) {
   566  	db.mutex.Lock()
   567  	rec, ok := db.blockIndex[hash.BIdx()]
   568  	if !ok {
   569  		db.mutex.Unlock()
   570  		e = errors.New("Block not in the index")
   571  		return
   572  	}
   573  	db.mutex.Unlock()
   574  
   575  	if rec.olen != 0 {
   576  		length = rec.olen
   577  		return
   578  	}
   579  
   580  	if !rec.compressed || !decode_if_needed {
   581  		length = rec.blen
   582  		return
   583  	}
   584  
   585  	_, _, e = db.BlockGet(hash)
   586  	if e == nil {
   587  		length = rec.olen
   588  	}
   589  
   590  	return
   591  }
   592  
   593  func (db *BlockDB) dat_fname(idx uint32, archive bool) (fn string) {
   594  	dir := db.dirname
   595  	if archive {
   596  		dir += "oldat" + string(os.PathSeparator)
   597  	}
   598  	if idx == 0 {
   599  		fn = dir + "blockchain.dat"
   600  	} else {
   601  		fn = dir + fmt.Sprintf("blockchain-%08x.dat", idx)
   602  	}
   603  	if _, er := os.Stat(fn); er != nil {
   604  		fn = dir + fmt.Sprintf("bl%08d.dat", idx)
   605  	}
   606  	return
   607  }
   608  
   609  func (db *BlockDB) LoadBlockIndex(ch *Chain, walk func(ch *Chain, hash, hdr []byte, height, blen, txs uint32)) (e error) {
   610  	var b [136]byte
   611  	var bh, txs uint32
   612  	db.blockindx.Seek(0, os.SEEK_SET)
   613  	db.maxidxfilepos = 0
   614  	rd := bufio.NewReader(db.blockindx)
   615  	for !AbortNow {
   616  		if _, e := io.ReadFull(rd, b[:]); e != nil {
   617  			break
   618  		}
   619  
   620  		bh = binary.LittleEndian.Uint32(b[36:40])
   621  		BlockHash := btc.NewSha2Hash(b[56:136])
   622  
   623  		if (b[0] & BLOCK_INVALID) != 0 {
   624  			// just ignore it
   625  			fmt.Println("BlockDB: Block", binary.LittleEndian.Uint32(b[36:40]), BlockHash.String(), "is invalid")
   626  			continue
   627  		}
   628  
   629  		ob := new(oneBl)
   630  		ob.trusted = (b[0] & BLOCK_TRUSTED) != 0
   631  		ob.compressed = (b[0] & BLOCK_COMPRSD) != 0
   632  		ob.snappied = (b[0] & BLOCK_SNAPPED) != 0
   633  		ob.fpos = binary.LittleEndian.Uint64(b[40:48])
   634  		blen := binary.LittleEndian.Uint32(b[48:52])
   635  		ob.blen = blen
   636  		if (b[0] & BLOCK_LENGTH) != 0 {
   637  			blen = binary.LittleEndian.Uint32(b[32:36])
   638  			ob.olen = blen
   639  		}
   640  		if (b[0] & BLOCK_INDEX) != 0 {
   641  			ob.datfileidx = binary.LittleEndian.Uint32(b[28:32])
   642  		}
   643  		if blen > 0 && ob.datfileidx != 0xffffffff && ob.datfileidx > db.maxdatfileidx {
   644  			db.maxdatfileidx = ob.datfileidx
   645  			db.maxdatfilepos = 0
   646  		}
   647  		txs = binary.LittleEndian.Uint32(b[52:56])
   648  		ob.ipos = db.maxidxfilepos
   649  
   650  		db.blockIndex[BlockHash.BIdx()] = ob
   651  
   652  		if int64(ob.fpos)+int64(ob.blen) > db.maxdatfilepos {
   653  			db.maxdatfilepos = int64(ob.fpos) + int64(ob.blen)
   654  		}
   655  
   656  		walk(ch, BlockHash.Hash[:], b[56:136], bh, blen, txs)
   657  		db.maxidxfilepos += 136
   658  	}
   659  	// In case if there was some trash at the end of data or index file, this should truncate it:
   660  	db.blockindx.Seek(db.maxidxfilepos, os.SEEK_SET)
   661  
   662  	db.blockdata, _ = os.OpenFile(db.dat_fname(db.maxdatfileidx, false), os.O_RDWR|os.O_CREATE, 0660)
   663  	if db.blockdata == nil {
   664  		panic("Cannot open blockchain.dat")
   665  	}
   666  
   667  	db.blockdata.Seek(db.maxdatfilepos, os.SEEK_SET)
   668  
   669  	// remove (or backup) the old .dat files before continuing
   670  	if db.data_files_keep != 0 && db.maxdatfileidx > db.data_files_keep {
   671  		idx := db.maxdatfileidx - db.data_files_keep
   672  		for limit := 0; limit < 3; limit++ {
   673  			idx--
   674  			fn := db.dat_fname(idx, false)
   675  			if fi, er := os.Stat(fn); er == nil && fi.Mode().IsRegular() {
   676  				db.data_files_done.Add(1)
   677  				//println("getting rid of", fn, "...")
   678  				db.removeDatFile(idx) // we're not using backgroud process here
   679  			} else {
   680  				os.Remove(fn + ".tmp")
   681  			}
   682  			if idx == 0 {
   683  				break
   684  			}
   685  		}
   686  	}
   687  
   688  	return
   689  }