github.com/piotrnar/gocoin@v0.0.0-20240512203912-faa0448c5e96/tools/bdb/bdb.go (about) 1 package main 2 3 import ( 4 "bytes" 5 "compress/gzip" 6 "encoding/binary" 7 "encoding/hex" 8 "errors" 9 "flag" 10 "fmt" 11 "io" 12 "io/ioutil" 13 "os" 14 "os/signal" 15 "strconv" 16 "strings" 17 "syscall" 18 19 "github.com/piotrnar/gocoin/lib/btc" 20 "github.com/piotrnar/gocoin/lib/chain" 21 "github.com/piotrnar/gocoin/lib/others/snappy" 22 ) 23 24 /* 25 blockchain.dat - contains raw blocks data, no headers, nothing 26 blockchain.new - contains records of 136 bytes (all values LSB): 27 [0] - flags: 28 bit(0) - "trusted" flag - this block's scripts have been verified 29 bit(1) - "invalid" flag - this block's scripts have failed 30 bit(2) - "compressed" flag - this block's data is compressed 31 bit(3) - "snappy" flag - this block is compressed with snappy (not gzip'ed) 32 bit(4) - if this bit is set, bytes [32:36] carry length of uncompressed block 33 bit(5) - if this bit is set, bytes [28:32] carry data file index 34 35 Used to be: 36 [4:36] - 256-bit block hash - DEPRECATED! (hash the header to get the value) 37 38 [4:28] - reserved 39 [28:32] - specifies which blockchain.dat file is used (if not zero, the filename is: blockchain-%08x.dat) 40 [32:36] - length of uncompressed block 41 42 [36:40] - 32-bit block height (genesis is 0) 43 [40:48] - 64-bit block pos in blockchain.dat file 44 [48:52] - 32-bit block lenght in bytes 45 [52:56] - 32-bit number of transaction in the block 46 [56:136] - 80 bytes blocks header 47 */ 48 49 const ( 50 TRUSTED = 0x01 51 INVALID = 0x02 52 ) 53 54 var ( 55 fl_help bool 56 fl_block, fl_stop uint 57 fl_dir string 58 fl_scan, fl_defrag bool 59 fl_split string 60 fl_skip uint 61 fl_append string 62 fl_trunc bool 63 fl_commit, fl_verify bool 64 fl_savebl string 65 fl_purgeall bool 66 fl_purgeto uint 67 fl_from, fl_to uint 68 fl_trusted int 69 fl_invalid int 70 fl_fixlen bool 71 fl_fixlenall bool 72 73 fl_mergedat uint 74 fl_movedat uint 75 76 fl_splitdat int 77 fl_mb uint 78 79 fl_datidx int 80 81 fl_purgedatidx bool 82 fl_rendat bool 83 84 fl_ord string 85 fl_ox bool 86 87 fl_compress string 88 89 buf [5 * 1024 * 1024]byte // 5MB should be anough 90 ) 91 92 /********************************************************/ 93 type one_idx_rec struct { 94 sl []byte 95 hash [32]byte 96 } 97 98 func new_sl(sl []byte) (r one_idx_rec) { 99 r.sl = sl[:136] 100 btc.ShaHash(sl[56:136], r.hash[:]) 101 return 102 } 103 104 func (r one_idx_rec) Flags() uint32 { 105 return binary.LittleEndian.Uint32(r.sl[0:4]) 106 } 107 108 func (r one_idx_rec) Height() uint32 { 109 return binary.LittleEndian.Uint32(r.sl[36:40]) 110 } 111 112 func (r one_idx_rec) DPos() uint64 { 113 return binary.LittleEndian.Uint64(r.sl[40:48]) 114 } 115 116 func (r one_idx_rec) SetDPos(dp uint64) { 117 binary.LittleEndian.PutUint64(r.sl[40:48], dp) 118 } 119 120 func (r one_idx_rec) DLen() uint32 { 121 return binary.LittleEndian.Uint32(r.sl[48:52]) 122 } 123 124 func (r one_idx_rec) Size() uint32 { 125 return binary.LittleEndian.Uint32(r.sl[32:36]) 126 } 127 128 func (r one_idx_rec) SetDLen(l uint32) { 129 binary.LittleEndian.PutUint32(r.sl[48:52], l) 130 } 131 132 func (r one_idx_rec) SetDatIdx(l uint32) { 133 r.sl[0] |= 0x20 134 binary.LittleEndian.PutUint32(r.sl[28:32], l) 135 } 136 137 func (r one_idx_rec) Hash() []byte { 138 return r.hash[:] 139 } 140 141 func (r one_idx_rec) HIdx() (h [32]byte) { 142 copy(h[:], r.hash[:]) 143 return 144 } 145 146 func (r one_idx_rec) Parent() []byte { 147 return r.sl[60:92] 148 } 149 150 func (r one_idx_rec) PIdx() [32]byte { 151 var h [32]byte 152 copy(h[:], r.sl[60:92]) 153 return h 154 } 155 156 func (r one_idx_rec) DatIdx() uint32 { 157 if (r.sl[0] & 0x20) != 0 { 158 return binary.LittleEndian.Uint32(r.sl[28:32]) 159 } 160 return 0 161 } 162 163 /********************************************************/ 164 165 type one_tree_node struct { 166 off int // offset in teh idx file 167 one_idx_rec 168 parent *one_tree_node 169 next *one_tree_node 170 } 171 172 /********************************************************/ 173 174 func print_record(sl []byte) { 175 var dat_idx uint32 176 if (sl[0] & 0x20) != 0 { 177 dat_idx = binary.LittleEndian.Uint32(sl[28:32]) 178 } 179 bh := btc.NewSha2Hash(sl[56:136]) 180 fmt.Println("Block", bh.String()) 181 fmt.Println(" ... Height", binary.LittleEndian.Uint32(sl[36:40]), 182 " - ", binary.LittleEndian.Uint32(sl[48:52]), "bytes @", 183 binary.LittleEndian.Uint64(sl[40:48]), "in", dat_fname(dat_idx)) 184 fmt.Print(" Flags: ", fmt.Sprintf("0x%02x", sl[0]), " ") 185 for i, s := range []string{"TRUST", "INVAL", "COMPR", "SNAPY", "LNGTH", "INDEX"} { 186 if (sl[0] & (1 << i)) != 0 { 187 fmt.Print(" ", s) 188 } 189 } 190 fmt.Println() 191 if (sl[0] & chain.BLOCK_LENGTH) != 0 { 192 fmt.Println(" Uncompressed length:", 193 binary.LittleEndian.Uint32(sl[32:36]), "bytes") 194 } 195 if (sl[0] & chain.BLOCK_INDEX) != 0 { 196 fmt.Println(" Data file index:", dat_idx) 197 } 198 hdr := sl[56:136] 199 fmt.Println(" ->", btc.NewUint256(hdr[4:36]).String()) 200 } 201 202 func verify_block(blk []byte, sl one_idx_rec, off int) { 203 bl, er := btc.NewBlock(blk) 204 if er != nil { 205 println("\nERROR verify_block", sl.Height(), btc.NewUint256(sl.Hash()).String(), er.Error()) 206 return 207 } 208 if !bytes.Equal(bl.Hash.Hash[:], sl.Hash()) { 209 println("\nERROR verify_block", sl.Height(), btc.NewUint256(sl.Hash()).String(), "Header invalid") 210 return 211 } 212 213 er = bl.BuildTxList() 214 if er != nil { 215 println("\nERROR verify_block", sl.Height(), btc.NewUint256(sl.Hash()).String(), er.Error()) 216 return 217 } 218 219 merk, _ := bl.GetMerkle() 220 if !bytes.Equal(bl.MerkleRoot(), merk) { 221 println("\nERROR verify_block", sl.Height(), btc.NewUint256(sl.Hash()).String(), "Payload invalid / Merkle mismatch") 222 return 223 } 224 } 225 226 func decomp_block(fl uint32, buf []byte) (blk []byte) { 227 if (fl & chain.BLOCK_COMPRSD) != 0 { 228 if (fl & chain.BLOCK_SNAPPED) != 0 { 229 blk, _ = snappy.Decode(nil, buf) 230 } else { 231 gz, _ := gzip.NewReader(bytes.NewReader(buf)) 232 blk, _ = ioutil.ReadAll(gz) 233 gz.Close() 234 } 235 } else { 236 blk = buf 237 } 238 return 239 } 240 241 // look_for_range looks for the first and last records with the given index. 242 func look_for_range(dat []byte, _idx uint32) (min_valid_off, max_valid_off int) { 243 min_valid_off = -1 244 for off := 0; off < len(dat); off += 136 { 245 sl := new_sl(dat[off:]) 246 idx := sl.DatIdx() 247 if sl.DLen() > 0 { 248 if idx == _idx { 249 if min_valid_off == -1 { 250 min_valid_off = off 251 } 252 max_valid_off = off 253 } else if min_valid_off != -1 { 254 break 255 } 256 } 257 } 258 return 259 } 260 261 func dat_fname(idx uint32) (fn string) { 262 if idx == 0 { 263 fn = "blockchain.dat" 264 } else { 265 fn = fmt.Sprintf("blockchain-%08x.dat", idx) 266 } 267 if _, er := os.Stat(fn); er != nil { 268 fn = fmt.Sprintf("bl%08d.dat", idx) 269 } 270 return 271 } 272 273 func split_the_data_file(parent_f *os.File, idx uint32, maxlen uint64, dat []byte, min_valid_off, max_valid_off int) bool { 274 fname := dat_fname(idx) 275 276 if fi, _ := os.Stat(fname); fi != nil { 277 fmt.Println(fi.Name(), "exist - get rid of it first") 278 return false 279 } 280 281 rec_from := new_sl(dat[min_valid_off : min_valid_off+136]) 282 pos_from := rec_from.DPos() 283 284 for off := min_valid_off; off <= max_valid_off; off += 136 { 285 rec := new_sl(dat[off : off+136]) 286 if rec.DLen() == 0 { 287 continue 288 } 289 dpos := rec.DPos() 290 if dpos-pos_from+uint64(rec.DLen()) > maxlen { 291 if !split_the_data_file(parent_f, idx+1, maxlen, dat, off, max_valid_off) { 292 return false // abort spliting 293 } 294 //println("truncate parent at", dpos) 295 er := parent_f.Truncate(int64(dpos)) 296 if er != nil { 297 println(er.Error()) 298 } 299 max_valid_off = off - 136 300 break // go to the next stage 301 } 302 } 303 304 // at this point parent_f should be truncated 305 f, er := os.Create(fname) 306 if er != nil { 307 fmt.Println(er.Error()) 308 return false 309 } 310 311 parent_f.Seek(int64(pos_from), os.SEEK_SET) 312 for { 313 n, _ := parent_f.Read(buf[:]) 314 if n > 0 { 315 f.Write(buf[:n]) 316 } 317 if n != len(buf) { 318 break 319 } 320 } 321 322 //println(".. child split", fname, "at offs", min_valid_off/136, "...", max_valid_off/136, "fpos:", pos_from, " maxlen:", maxlen) 323 for off := min_valid_off; off <= max_valid_off; off += 136 { 324 sl := new_sl(dat[off : off+136]) 325 sl.SetDatIdx(idx) 326 sl.SetDPos(sl.DPos() - pos_from) 327 } 328 // flush blockchain.new to disk wicth each noe split for safety 329 ioutil.WriteFile("blockchain.tmp", dat, 0600) 330 os.Rename("blockchain.tmp", "blockchain.new") 331 332 return true 333 } 334 335 func calc_total_size(dat []byte) (res uint64) { 336 for off := 0; off < len(dat); off += 136 { 337 sl := new_sl(dat[off : off+136]) 338 res += uint64(sl.DLen()) 339 } 340 return 341 } 342 343 func open_dat_file(idx uint32) (f *os.File, er error) { 344 f, er = os.Open(fl_dir + dat_fname(idx)) 345 if er != nil { 346 f, er = os.Open(fl_dir + "oldat" + string(os.PathSeparator) + dat_fname(idx)) 347 } 348 return 349 } 350 351 // ExtractOrdFile extracts the file (inscription) stored inside the segwit data 352 // ... as per github.com/casey/ord 353 // 354 // p - is the segwith data returned by transaction's ContainsOrdFile() 355 // 356 // returns file type and the file itself 357 func ExtractOrdFile(p []byte) (typ string, data []byte, e error) { 358 var opcode_idx int 359 var byte_idx int 360 361 for byte_idx < len(p) { 362 opcode, vchPushValue, n, er := btc.GetOpcode(p[byte_idx:]) 363 if er != nil { 364 e = errors.New("ExtractOrdinaryFile: " + er.Error()) 365 return 366 } 367 368 byte_idx += n 369 370 switch opcode_idx { 371 case 0: 372 if len(vchPushValue) != 32 { 373 e = errors.New("opcode_idx 0: No push data 32 bytes") 374 return 375 } 376 case 1: 377 if opcode != btc.OP_CHECKSIG { 378 e = errors.New("opcode_idx 1: OP_CHECKSIG missing") 379 return 380 } 381 case 2: 382 if opcode != btc.OP_FALSE { 383 e = errors.New("opcode_idx 2: OP_FALSE missing") 384 return 385 } 386 case 3: 387 if opcode != btc.OP_IF { 388 e = errors.New("opcode_idx 3: OP_IF missing") 389 return 390 } 391 case 4: 392 if len(vchPushValue) != 3 || string(vchPushValue) != "ord" { 393 e = errors.New("opcode_idx 4: missing ord string") 394 return 395 } 396 case 5: 397 if len(vchPushValue) != 1 || vchPushValue[0] != 1 { 398 //println("opcode_idx 5:", hex.EncodeToString(vchPushValue), string(vchPushValue), "-ignore") 399 opcode_idx-- // ignore this one 400 } 401 case 6: 402 typ = string(vchPushValue) 403 case 7: 404 if opcode != btc.OP_FALSE { 405 if len(vchPushValue) == 1 || vchPushValue[0] == 7 { 406 break 407 } 408 e = errors.New("opcode_idx 7: OP_FALSE missing") 409 return 410 } 411 default: 412 if opcode == btc.OP_ENDIF { 413 return 414 } 415 data = append(data, vchPushValue...) 416 } 417 418 opcode_idx++ 419 } 420 return 421 } 422 423 func main() { 424 flag.BoolVar(&fl_help, "h", false, "Show help") 425 flag.UintVar(&fl_block, "block", 0, "Print details of the given block number (or start -verify from it)") 426 flag.BoolVar(&fl_scan, "scan", false, "Scan database for first extra blocks") 427 flag.BoolVar(&fl_defrag, "defrag", false, "Purge all the orphaned blocks") 428 flag.UintVar(&fl_stop, "stop", 0, "Stop after so many scan errors") 429 flag.StringVar(&fl_dir, "dir", "", "Use blockdb from this directory") 430 flag.StringVar(&fl_split, "split", "", "Split blockdb at this block's hash") 431 flag.UintVar(&fl_skip, "skip", 0, "Skip this many blocks when splitting") 432 flag.StringVar(&fl_append, "append", "", "Append blocks from this folder to the database") 433 flag.BoolVar(&fl_trunc, "trunc", false, "Truncate insted of splitting") 434 flag.BoolVar(&fl_commit, "commit", false, "Optimize the size of the data file") 435 flag.BoolVar(&fl_verify, "verify", false, "Verify each block inside the database") 436 flag.StringVar(&fl_savebl, "savebl", "", "Save block with the given hash to disk") 437 flag.BoolVar(&fl_purgeall, "purgeall", false, "Purge all blocks from the database") 438 flag.UintVar(&fl_purgeto, "purgeto", 0, "Purge all blocks till (but excluding) the given height") 439 440 flag.UintVar(&fl_from, "from", 0, "Set/clear flag from this block") 441 flag.UintVar(&fl_to, "to", 0xffffffff, "Set/clear flag to this block or merge/rename into this data file index") 442 flag.IntVar(&fl_invalid, "invalid", -1, "Set (1) or clear (0) INVALID flag") 443 flag.IntVar(&fl_trusted, "trusted", -1, "Set (1) or clear (0) TRUSTED flag") 444 445 flag.BoolVar(&fl_fixlen, "fixlen", false, "Calculate (fix) orignial length of last 144 blocks") 446 flag.BoolVar(&fl_fixlenall, "fixlenall", false, "Calculate (fix) orignial length of each block") 447 448 flag.UintVar(&fl_mergedat, "mergedat", 0, "Merge this data file index into the data file specified by -to <idx>") 449 flag.UintVar(&fl_movedat, "movedat", 0, "Rename this data file index into the data file specified by -to <idx>") 450 451 flag.IntVar(&fl_splitdat, "splitdat", -1, "Split this data file into smaller parts (-mb <mb>)") 452 flag.UintVar(&fl_mb, "mb", 1000, "Split big data file into smaller parts of this size in MB (at least 8 MB)") 453 454 flag.IntVar(&fl_datidx, "datidx", -1, "Show records with the specific data file index") 455 456 flag.BoolVar(&fl_purgedatidx, "purgedatidx", false, "Remove reerence to dat files which are not on disk") 457 458 flag.BoolVar(&fl_rendat, "rendat", false, "Rename all blockchain*.dat files to the new format (blNNNNNNNN.dat)") 459 460 flag.StringVar(&fl_ord, "ord", "", "Analyse ord inscriptions of the given blocks (specify number or range)") 461 flag.BoolVar(&fl_ox, "ox", false, "Extract ord inscriptions instead of analysing (use with -ord))") 462 463 flag.StringVar(&fl_compress, "compress", "", "Compress all the blocks inside the given blxxxxxxxx.dat file") 464 465 flag.Parse() 466 467 if fl_help { 468 flag.PrintDefaults() 469 return 470 } 471 472 if fl_dir != "" && fl_dir[len(fl_dir)-1] != os.PathSeparator { 473 fl_dir += string(os.PathSeparator) 474 } 475 476 if fl_append != "" { 477 if fl_append[len(fl_append)-1] != os.PathSeparator { 478 fl_append += string(os.PathSeparator) 479 } 480 fmt.Println("Loading", fl_append+"blockchain.new") 481 dat, er := ioutil.ReadFile(fl_append + "blockchain.new") 482 if er != nil { 483 fmt.Println(er.Error()) 484 return 485 } 486 487 f, er := os.Open(fl_append + "blockchain.dat") 488 if er != nil { 489 fmt.Println(er.Error()) 490 return 491 } 492 493 fo, er := os.OpenFile(fl_dir+"blockchain.dat", os.O_WRONLY, 0600) 494 if er != nil { 495 f.Close() 496 fmt.Println(er.Error()) 497 return 498 } 499 datfilelen, _ := fo.Seek(0, os.SEEK_END) 500 501 fmt.Println("Appending blocks data to blockchain.dat") 502 for { 503 n, _ := f.Read(buf[:]) 504 if n > 0 { 505 fo.Write(buf[:n]) 506 } 507 if n != len(buf) { 508 break 509 } 510 } 511 fo.Close() 512 f.Close() 513 514 fmt.Println("Now appending", len(dat)/136, "records to blockchain.new") 515 fo, er = os.OpenFile(fl_dir+"blockchain.new", os.O_WRONLY, 0600) 516 if er != nil { 517 f.Close() 518 fmt.Println(er.Error()) 519 return 520 } 521 fo.Seek(0, os.SEEK_END) 522 523 for off := 0; off < len(dat); off += 136 { 524 sl := dat[off : off+136] 525 newoffs := binary.LittleEndian.Uint64(sl[40:48]) + uint64(datfilelen) 526 binary.LittleEndian.PutUint64(sl[40:48], newoffs) 527 fo.Write(sl) 528 } 529 fo.Close() 530 531 return 532 } 533 534 fmt.Println("Loading", fl_dir+"blockchain.new") 535 dat, er := ioutil.ReadFile(fl_dir + "blockchain.new") 536 if er != nil { 537 fmt.Println(er.Error()) 538 return 539 } 540 541 fmt.Println(len(dat)/136, "records") 542 543 if fl_rendat { 544 idxs_done := make(map[uint32]bool) 545 for off := 0; off < len(dat); off += 136 { 546 rec := new_sl(dat[off : off+136]) 547 idx := rec.DatIdx() 548 if !idxs_done[idx] { 549 fn := dat_fname(idx) 550 if strings.HasPrefix(fn, "blockchain") { 551 newfn := fmt.Sprintf("bl%08d.dat", idx) 552 //println("rename", fl_dir+fn, "to", fl_dir+newfn) 553 os.Rename(fl_dir+fn, fl_dir+newfn) 554 } 555 idxs_done[idx] = true 556 } 557 } 558 fmt.Println("All dat files have the new names now") 559 return 560 } 561 562 if fl_mergedat != 0 { 563 if fl_to >= fl_mergedat { 564 fmt.Println("To index must be lower than from index") 565 return 566 } 567 min_valid_from, max_valid_from := look_for_range(dat, uint32(fl_mergedat)) 568 if min_valid_from == -1 { 569 fmt.Println("Invalid from index") 570 return 571 } 572 573 from_fn := dat_fname(uint32(fl_mergedat)) 574 to_fn := dat_fname(uint32(fl_to)) 575 576 f, er := os.Open(from_fn) 577 if er != nil { 578 fmt.Println(er.Error()) 579 return 580 } 581 582 fo, er := os.OpenFile(to_fn, os.O_WRONLY, 0600) 583 if er != nil { 584 f.Close() 585 fmt.Println(er.Error()) 586 return 587 } 588 offset_to_add, _ := fo.Seek(0, os.SEEK_END) 589 590 fmt.Println("Appending", from_fn, "to", to_fn, "at offset", offset_to_add) 591 for { 592 n, _ := f.Read(buf[:]) 593 if n > 0 { 594 fo.Write(buf[:n]) 595 } 596 if n != len(buf) { 597 break 598 } 599 } 600 fo.Close() 601 f.Close() 602 603 var cnt int 604 for off := min_valid_from; off <= max_valid_from; off += 136 { 605 sl := dat[off : off+136] 606 fpos := binary.LittleEndian.Uint64(sl[40:48]) 607 fpos += uint64(offset_to_add) 608 binary.LittleEndian.PutUint64(sl[40:48], fpos) 609 sl[0] |= 0x20 610 binary.LittleEndian.PutUint32(sl[28:32], uint32(fl_to)) 611 cnt++ 612 } 613 ioutil.WriteFile("blockchain.tmp", dat, 0600) 614 os.Rename("blockchain.tmp", "blockchain.new") 615 os.Remove(from_fn) 616 fmt.Println(from_fn, "removed and", cnt, "records updated in blockchain.new") 617 return 618 } 619 620 if fl_movedat != 0 { 621 if fl_to == fl_movedat { 622 fmt.Println("To index must be different than from index") 623 return 624 } 625 min_valid, max_valid := look_for_range(dat, uint32(fl_movedat)) 626 if min_valid == -1 { 627 fmt.Println("Invalid from index") 628 return 629 } 630 to_fn := dat_fname(uint32(fl_to)) 631 632 if fi, _ := os.Stat(to_fn); fi != nil { 633 fmt.Println(fi.Name(), "exist - get rid of it first") 634 return 635 } 636 637 from_fn := dat_fname(uint32(fl_movedat)) 638 639 // first discard all the records with the target index 640 for off := 0; off < len(dat); off += 136 { 641 rec := new_sl(dat[off : off+136]) 642 if rec.DatIdx() == uint32(fl_to) { 643 rec.SetDLen(0) 644 rec.SetDatIdx(0xffffffff) 645 } 646 } 647 648 // now set the new index 649 var cnt int 650 for off := min_valid; off <= max_valid; off += 136 { 651 sl := dat[off : off+136] 652 sl[0] |= 0x20 653 binary.LittleEndian.PutUint32(sl[28:32], uint32(fl_to)) 654 cnt++ 655 } 656 ioutil.WriteFile("blockchain.tmp", dat, 0600) 657 os.Rename(from_fn, to_fn) 658 os.Rename("blockchain.tmp", "blockchain.new") 659 fmt.Println(from_fn, "renamed to ", to_fn, "and", cnt, "records updated in blockchain.new") 660 return 661 } 662 663 if fl_splitdat >= 0 { 664 if fl_mb < 8 { 665 fmt.Println("Minimal value of -mb parameter is 8") 666 return 667 } 668 fname := dat_fname(uint32(fl_splitdat)) 669 fmt.Println("Spliting file", fname, "into chunks - up to", fl_mb, "MB...") 670 min_valid_off, max_valid_off := look_for_range(dat, uint32(fl_splitdat)) 671 f, er := os.OpenFile(fname, os.O_RDWR, 0600) 672 if er != nil { 673 fmt.Println(er.Error()) 674 return 675 } 676 defer f.Close() 677 //fmt.Println("Range:", min_valid_off/136, "...", max_valid_off/136) 678 679 maxlen := uint64(fl_mb) << 20 680 for off := min_valid_off; off <= max_valid_off; off += 136 { 681 rec := new_sl(dat[off : off+136]) 682 if rec.DLen() == 0 { 683 continue 684 } 685 dpos := rec.DPos() 686 if dpos+uint64(rec.DLen()) > maxlen { 687 //println("root split from", dpos) 688 if !split_the_data_file(f, uint32(fl_splitdat)+1, maxlen, dat, off, max_valid_off) { 689 fmt.Println("Splitting failed") 690 return 691 } 692 f.Truncate(int64(dpos)) 693 fmt.Println("Splitting succeeded") 694 return 695 } 696 } 697 fmt.Println("There was nothing to split") 698 return 699 } 700 701 if fl_datidx >= 0 { 702 fname := dat_fname(uint32(fl_datidx)) 703 min_valid_off, max_valid_off := look_for_range(dat, uint32(fl_datidx)) 704 if min_valid_off == -1 { 705 fmt.Println(fname, "is not used by any record") 706 return 707 } 708 fmt.Println(fname, "is used by", (max_valid_off-min_valid_off)/136+1, "records. From", min_valid_off/136, "to", max_valid_off/136) 709 fmt.Println("Block height from", new_sl(dat[min_valid_off:]).Height(), "to", new_sl(dat[max_valid_off:]).Height()) 710 return 711 } 712 713 if fl_purgedatidx { 714 cache := make(map[uint32]bool) 715 var cnt int 716 for off := 0; off < len(dat); off += 136 { 717 rec := new_sl(dat[off:]) 718 if rec.DLen() == 0 && rec.DatIdx() == 0xffffffff { 719 continue 720 } 721 idx := rec.DatIdx() 722 have_file, ok := cache[idx] 723 if !ok { 724 fi, _ := os.Stat(dat_fname(idx)) 725 have_file = fi != nil 726 cache[idx] = have_file 727 } 728 if !have_file { 729 rec.SetDatIdx(0xffffffff) 730 rec.SetDLen(0) 731 cnt++ 732 } 733 } 734 if cnt > 0 { 735 ioutil.WriteFile("blockchain.tmp", dat, 0600) 736 os.Rename("blockchain.tmp", "blockchain.new") 737 fmt.Println(cnt, "records removed from blockchain.new") 738 } else { 739 fmt.Println("Data files seem consisent - no need to remove anything") 740 } 741 return 742 } 743 744 if fl_invalid == 0 || fl_invalid == 1 || fl_trusted == 0 || fl_trusted == 1 { 745 var cnt uint64 746 for off := 0; off < len(dat); off += 136 { 747 sl := dat[off : off+136] 748 if uint(binary.LittleEndian.Uint32(sl[36:40])) < fl_from { 749 continue 750 } 751 if uint(binary.LittleEndian.Uint32(sl[36:40])) > fl_to { 752 continue 753 } 754 if fl_invalid == 0 { 755 if (sl[0] & INVALID) != 0 { 756 sl[0] &= ^byte(INVALID) 757 cnt++ 758 } 759 } else if fl_invalid == 1 { 760 if (sl[0] & INVALID) == 0 { 761 sl[0] |= INVALID 762 cnt++ 763 } 764 } 765 if fl_trusted == 0 { 766 if (sl[0] & TRUSTED) != 0 { 767 sl[0] &= ^byte(TRUSTED) 768 cnt++ 769 } 770 } else if fl_trusted == 1 { 771 if (sl[0] & TRUSTED) == 0 { 772 sl[0] |= TRUSTED 773 cnt++ 774 } 775 } 776 } 777 ioutil.WriteFile("blockchain.tmp", dat, 0600) 778 os.Rename("blockchain.tmp", "blockchain.new") 779 fmt.Println(cnt, "flags updated in blockchain.new") 780 } 781 782 if fl_purgeall { 783 for off := 0; off < len(dat); off += 136 { 784 sl := dat[off : off+136] 785 binary.LittleEndian.PutUint64(sl[40:48], 0) 786 binary.LittleEndian.PutUint32(sl[48:52], 0) 787 } 788 ioutil.WriteFile("blockchain.tmp", dat, 0600) 789 os.Rename("blockchain.tmp", "blockchain.new") 790 fmt.Println("blockchain.new upated. Now delete blockchain.dat yourself...") 791 } 792 793 if fl_purgeto != 0 { 794 var cur_dat_pos uint64 795 796 f, er := os.Open("blockchain.dat") 797 if er != nil { 798 println(er.Error()) 799 return 800 } 801 defer f.Close() 802 803 newdir := fmt.Sprint("purged_to_", fl_purgeto, string(os.PathSeparator)) 804 os.Mkdir(newdir, os.ModePerm) 805 806 o, er := os.Create(newdir + "blockchain.dat") 807 if er != nil { 808 println(er.Error()) 809 return 810 } 811 defer o.Close() 812 813 for off := 0; off < len(dat); off += 136 { 814 sl := new_sl(dat[off : off+136]) 815 816 if uint(sl.Height()) < fl_purgeto { 817 sl.SetDLen(0) 818 sl.SetDPos(0) 819 } else { 820 blen := int(sl.DLen()) 821 f.Seek(int64(sl.DPos()), os.SEEK_SET) 822 _, er = io.ReadFull(f, buf[:blen]) 823 if er != nil { 824 println(er.Error()) 825 return 826 } 827 sl.SetDPos(cur_dat_pos) 828 cur_dat_pos += uint64(blen) 829 o.Write(buf[:blen]) 830 } 831 } 832 ioutil.WriteFile(newdir+"blockchain.new", dat, 0600) 833 return 834 } 835 836 if fl_scan { 837 var scan_errs uint 838 last_bl_height := binary.LittleEndian.Uint32(dat[36:40]) 839 exp_offset := uint64(binary.LittleEndian.Uint32(dat[48:52])) 840 fmt.Println("Scanning database for first extra block(s)...") 841 fmt.Println("First block in the file has height", last_bl_height) 842 for off := 136; off < len(dat); off += 136 { 843 sl := dat[off : off+136] 844 height := binary.LittleEndian.Uint32(sl[36:40]) 845 off_in_bl := binary.LittleEndian.Uint64(sl[40:48]) 846 847 if height != last_bl_height+1 { 848 fmt.Println("Out of sequence block number", height, last_bl_height+1, "found at offset", off) 849 print_record(dat[off-136 : off]) 850 print_record(dat[off : off+136]) 851 fmt.Println() 852 scan_errs++ 853 } 854 if off_in_bl != exp_offset { 855 fmt.Println("Spare data found just before block number", height, off_in_bl, exp_offset) 856 print_record(dat[off-136 : off]) 857 print_record(dat[off : off+136]) 858 scan_errs++ 859 } 860 861 if fl_stop != 0 && scan_errs >= fl_stop { 862 break 863 } 864 865 last_bl_height = height 866 867 exp_offset += uint64(binary.LittleEndian.Uint32(sl[48:52])) 868 } 869 return 870 } 871 872 if fl_defrag { 873 blks := make(map[[32]byte]*one_tree_node, len(dat)/136) 874 for off := 0; off < len(dat); off += 136 { 875 sl := new_sl(dat[off : off+136]) 876 blks[sl.HIdx()] = &one_tree_node{off: off, one_idx_rec: sl} 877 } 878 var maxbl uint32 879 var maxblptr *one_tree_node 880 for _, v := range blks { 881 v.parent = blks[v.PIdx()] 882 h := v.Height() 883 if h > maxbl { 884 maxbl = h 885 maxblptr = v 886 } else if h == maxbl { 887 maxblptr = nil 888 } 889 } 890 fmt.Println("Max block height =", maxbl) 891 if maxblptr == nil { 892 fmt.Println("More than one block at maximum height - cannot continue") 893 return 894 } 895 used := make(map[[32]byte]bool) 896 var first_block *one_tree_node 897 var total_data_size uint64 898 for n := maxblptr; n != nil; n = n.parent { 899 if n.parent != nil { 900 n.parent.next = n 901 } 902 used[n.PIdx()] = true 903 if first_block == nil || first_block.Height() > n.Height() { 904 first_block = n 905 } 906 total_data_size += uint64(n.DLen()) 907 } 908 if len(used) < len(blks) { 909 fmt.Println("Purge", len(blks)-len(used), "blocks from the index file...") 910 f, e := os.Create(fl_dir + "blockchain.tmp") 911 if e != nil { 912 println(e.Error()) 913 return 914 } 915 var off int 916 for n := first_block; n != nil; n = n.next { 917 n.off = off 918 n.sl[0] = n.sl[0] & 0xfc 919 f.Write(n.sl) 920 off += len(n.sl) 921 } 922 f.Close() 923 os.Rename(fl_dir+"blockchain.tmp", fl_dir+"blockchain.new") 924 } else { 925 fmt.Println("The index file looks perfect") 926 } 927 928 for n := first_block; n != nil && n.next != nil; n = n.next { 929 if n.next.DPos() < n.DPos() { 930 fmt.Println("There is a problem... swapped order in the data file!", n.off) 931 return 932 } 933 } 934 935 fdat, er := os.OpenFile(fl_dir+"blockchain.dat", os.O_RDWR, 0600) 936 if er != nil { 937 println(er.Error()) 938 return 939 } 940 941 if fl, _ := fdat.Seek(0, os.SEEK_END); uint64(fl) == total_data_size { 942 fdat.Close() 943 fmt.Println("All good - blockchain.dat has an optimal length") 944 return 945 } 946 947 if !fl_commit { 948 fdat.Close() 949 fmt.Println("Warning: blockchain.dat shall be defragmented. Use \"-defrag -commit\"") 950 return 951 } 952 953 fidx, er := os.OpenFile(fl_dir+"blockchain.new", os.O_RDWR, 0600) 954 if er != nil { 955 println(er.Error()) 956 return 957 } 958 959 // Capture Ctrl+C 960 killchan := make(chan os.Signal, 1) 961 signal.Notify(killchan, os.Interrupt, syscall.SIGTERM) 962 963 var doff uint64 964 var prv_perc uint64 = 101 965 for n := first_block; n != nil; n = n.next { 966 perc := 1000 * doff / total_data_size 967 dp := n.DPos() 968 dl := n.DLen() 969 if perc != prv_perc { 970 fmt.Printf("\rDefragmenting data file - %.1f%% (%d bytes saved so far)...", 971 float64(perc)/10.0, dp-doff) 972 prv_perc = perc 973 } 974 if dp > doff { 975 fdat.Seek(int64(dp), os.SEEK_SET) 976 fdat.Read(buf[:int(dl)]) 977 978 n.SetDPos(doff) 979 980 fdat.Seek(int64(doff), os.SEEK_SET) 981 fdat.Write(buf[:int(dl)]) 982 983 fidx.Seek(int64(n.off), os.SEEK_SET) 984 fidx.Write(n.sl) 985 } 986 doff += uint64(dl) 987 988 select { 989 case <-killchan: 990 fmt.Println("interrupted") 991 fidx.Close() 992 fdat.Close() 993 fmt.Println("Database closed - should be still usable, but no space saved") 994 return 995 default: 996 } 997 } 998 999 fidx.Close() 1000 fdat.Close() 1001 fmt.Println() 1002 1003 fmt.Println("Truncating blockchain.dat at position", doff) 1004 os.Truncate(fl_dir+"blockchain.dat", int64(doff)) 1005 1006 return 1007 } 1008 1009 if fl_verify { 1010 var prv_perc uint64 = 0xffffffffff 1011 var totlen uint64 1012 var dat_file_open uint32 = 0xffffffff 1013 var fdat *os.File 1014 var cnt, cnt_nd, cnt_err int 1015 var cur_progress uint64 1016 1017 total_data_size := calc_total_size(dat) 1018 1019 for off := 0; off < len(dat); off += 136 { 1020 sl := new_sl(dat[off : off+136]) 1021 1022 le := int(sl.DLen()) 1023 if le == 0 { 1024 continue 1025 } 1026 cur_progress += uint64(sl.DLen()) 1027 1028 hei := uint(sl.Height()) 1029 1030 if hei < fl_from { 1031 continue 1032 } 1033 1034 idx := sl.DatIdx() 1035 if idx == 0xffffffff { 1036 continue 1037 } 1038 1039 if idx != dat_file_open { 1040 var er error 1041 dat_file_open = idx 1042 if fdat != nil { 1043 fdat.Close() 1044 } 1045 fdat, er = os.OpenFile(fl_dir+dat_fname(idx), os.O_RDWR, 0600) 1046 if er != nil { 1047 //println(er.Error()) 1048 continue 1049 } 1050 } 1051 1052 perc := 1000 * cur_progress / total_data_size 1053 if perc != prv_perc { 1054 fmt.Printf("\rVerifying blocks data - %.1f%% @ %d / %dMB processed... idx:%d", 1055 float64(perc)/10.0, hei, totlen>>20, idx) 1056 prv_perc = perc 1057 } 1058 1059 if fl_block != 0 && hei < fl_block { 1060 continue 1061 } 1062 1063 dp := int64(sl.DPos()) 1064 fdat.Seek(dp, os.SEEK_SET) 1065 n, _ := fdat.Read(buf[:le]) 1066 if n != le { 1067 //fmt.Println("Block", hei, "not in dat file", idx, dp) 1068 cnt_nd++ 1069 continue 1070 } 1071 1072 blk := decomp_block(sl.Flags(), buf[:le]) 1073 if blk == nil { 1074 fmt.Println("Block", hei, "decompression failed") 1075 cnt_err++ 1076 continue 1077 } 1078 1079 verify_block(blk, sl, off) 1080 cnt++ 1081 1082 totlen += uint64(len(blk)) 1083 } 1084 if fdat != nil { 1085 fdat.Close() 1086 } 1087 fmt.Println("\nAll blocks done -", totlen>>20, "MB and", cnt, "blocks verified OK") 1088 fmt.Println("No data errors:", cnt_nd, " Decompression errors:", cnt_err) 1089 return 1090 } 1091 1092 if fl_block != 0 { 1093 for off := 0; off < len(dat); off += 136 { 1094 sl := dat[off : off+136] 1095 height := binary.LittleEndian.Uint32(sl[36:40]) 1096 if uint(height) == fl_block { 1097 print_record(dat[off : off+136]) 1098 } 1099 } 1100 return 1101 } 1102 1103 if fl_split != "" { 1104 th := btc.NewUint256FromString(fl_split) 1105 if th == nil { 1106 println("incorrect block hash") 1107 return 1108 } 1109 for off := 0; off < len(dat); off += 136 { 1110 sl := dat[off : off+136] 1111 height := binary.LittleEndian.Uint32(sl[36:40]) 1112 bh := btc.NewSha2Hash(sl[56:136]) 1113 if bh.Hash == th.Hash { 1114 trunc_idx_offs := int64(off) 1115 trunc_dat_offs := int64(binary.LittleEndian.Uint64(sl[40:48])) 1116 trunc_dat_idx := binary.LittleEndian.Uint32(sl[28:32]) 1117 cur_dat_fname := dat_fname(trunc_dat_idx) 1118 fmt.Println("Truncate blockchain.new at offset", trunc_idx_offs) 1119 fmt.Println("Truncate", dat_fname(trunc_dat_idx), "at offset", trunc_dat_offs) 1120 if !fl_trunc { 1121 new_dir := fl_dir + fmt.Sprint(height) + string(os.PathSeparator) 1122 os.Mkdir(new_dir, os.ModePerm) 1123 1124 new_dat_idx := trunc_dat_idx + 1 1125 new_dat_fname := dat_fname(new_dat_idx) 1126 1127 f, e := os.Open(fl_dir + cur_dat_fname) 1128 if e != nil { 1129 fmt.Println(e.Error()) 1130 return 1131 } 1132 df, e := os.Create(new_dir + new_dat_fname) 1133 if e != nil { 1134 f.Close() 1135 fmt.Println(e.Error()) 1136 return 1137 } 1138 1139 f.Seek(trunc_dat_offs, os.SEEK_SET) 1140 1141 fmt.Println("But fist save the rest as", new_dir+new_dat_fname, "...") 1142 if fl_skip != 0 { 1143 fmt.Println("Skip", fl_skip, "blocks in the output file") 1144 for fl_skip > 0 { 1145 skipbytes := binary.LittleEndian.Uint32(sl[48:52]) 1146 fmt.Println(" -", skipbytes, "bytes of block", binary.LittleEndian.Uint32(sl[36:40])) 1147 off += 136 1148 if off < len(dat) { 1149 sl = dat[off : off+136] 1150 fl_skip-- 1151 } else { 1152 break 1153 } 1154 } 1155 } 1156 1157 for { 1158 n, _ := f.Read(buf[:]) 1159 if n > 0 { 1160 df.Write(buf[:n]) 1161 } 1162 if n != len(buf) { 1163 break 1164 } 1165 } 1166 df.Close() 1167 f.Close() 1168 1169 df, e = os.Create(new_dir + "blockchain.new") 1170 if e != nil { 1171 f.Close() 1172 fmt.Println(e.Error()) 1173 return 1174 } 1175 var off2 int 1176 for off2 = off; off2 < len(dat); off2 += 136 { 1177 sl := dat[off2 : off2+136] 1178 newoffs := binary.LittleEndian.Uint64(sl[40:48]) - uint64(trunc_dat_offs) 1179 binary.LittleEndian.PutUint64(sl[40:48], newoffs) 1180 binary.LittleEndian.PutUint32(sl[28:32], new_dat_idx) 1181 df.Write(sl) 1182 } 1183 df.Close() 1184 } 1185 1186 os.Truncate(fl_dir+"blockchain.new", trunc_idx_offs) 1187 os.Truncate(fl_dir+cur_dat_fname, trunc_dat_offs) 1188 return 1189 } 1190 } 1191 fmt.Println("Block not found - nothing truncated") 1192 } 1193 1194 if fl_savebl != "" { 1195 bh := btc.NewUint256FromString(fl_savebl) 1196 if bh == nil { 1197 println("Incortrect block hash:", fl_savebl) 1198 return 1199 } 1200 for off := 0; off < len(dat); off += 136 { 1201 sl := new_sl(dat[off : off+136]) 1202 if bytes.Equal(sl.Hash(), bh.Hash[:]) { 1203 f, er := open_dat_file(sl.DatIdx()) 1204 if er != nil { 1205 println(er.Error()) 1206 return 1207 } 1208 bu := buf[:int(sl.DLen())] 1209 f.Seek(int64(sl.DPos()), os.SEEK_SET) 1210 f.Read(bu) 1211 f.Close() 1212 ioutil.WriteFile(bh.String()+".bin", decomp_block(sl.Flags(), bu), 0600) 1213 fmt.Println(bh.String()+".bin written to disk. It has height", sl.Height()) 1214 return 1215 } 1216 } 1217 fmt.Println("Block", bh.String(), "not found in the database") 1218 return 1219 } 1220 1221 if fl_fixlen || fl_fixlenall { 1222 fdat, er := os.OpenFile(fl_dir+"blockchain.dat", os.O_RDWR, 0600) 1223 if er != nil { 1224 println(er.Error()) 1225 return 1226 } 1227 1228 dat_file_size, _ := fdat.Seek(0, os.SEEK_END) 1229 1230 var prv_perc int64 = -1 1231 var totlen uint64 1232 var off int 1233 if !fl_fixlenall { 1234 off = len(dat) - 144*136 1235 } 1236 for ; off < len(dat); off += 136 { 1237 sl := new_sl(dat[off : off+136]) 1238 olen := binary.LittleEndian.Uint32(sl.sl[32:36]) 1239 if olen == 0 { 1240 sl := new_sl(dat[off : off+136]) 1241 dp := int64(sl.DPos()) 1242 le := int(sl.DLen()) 1243 1244 perc := 1000 * dp / dat_file_size 1245 if perc != prv_perc { 1246 fmt.Printf("\rUpdating blocks length - %.1f%% / %dMB processed...", 1247 float64(perc)/10.0, totlen>>20) 1248 prv_perc = perc 1249 } 1250 1251 fdat.Seek(dp, os.SEEK_SET) 1252 fdat.Read(buf[:le]) 1253 blk := decomp_block(sl.Flags(), buf[:le]) 1254 binary.LittleEndian.PutUint32(sl.sl[32:36], uint32(len(blk))) 1255 sl.sl[0] |= 0x10 1256 1257 totlen += uint64(len(blk)) 1258 } 1259 } 1260 ioutil.WriteFile("blockchain.tmp", dat, 0600) 1261 os.Rename("blockchain.tmp", "blockchain.new") 1262 fmt.Println("blockchain.new updated") 1263 } 1264 1265 if fl_compress != "" { 1266 var idx int 1267 if fl_compress == "blockchain.dat" { 1268 idx = 0 1269 } else if n, _ := fmt.Sscanf(fl_compress, "blockchain-%08x.dat", &idx); n == 1 { 1270 // old format 1271 } else if n, _ := fmt.Sscanf(fl_compress, "bl%08d.dat", &idx); n == 1 { 1272 // new format 1273 } else { 1274 println("The given filename does not match the pattern") 1275 return 1276 } 1277 fmt.Println("Compressing all blocks in data file with index", idx) 1278 1279 fdat, er := os.OpenFile(fl_dir+fl_compress, os.O_RDONLY, 0600) 1280 if er != nil { 1281 println(er.Error()) 1282 return 1283 } 1284 1285 fdatnew, er := os.Create(fl_dir + fl_compress + ".tmp") 1286 if er != nil { 1287 println(er.Error()) 1288 fdat.Close() 1289 return 1290 } 1291 1292 var off int 1293 var done_cnt, ignored_cnt, recompd_cnt int 1294 var fdaynew_offs uint64 1295 for ; off < len(dat); off += 136 { 1296 sl := new_sl(dat[off : off+136]) 1297 if int(sl.DatIdx()) == idx { 1298 var cbts []byte 1299 fl := sl.Flags() 1300 blen := int(sl.DLen()) 1301 fdat.Seek(int64(sl.DPos()), os.SEEK_SET) 1302 _, er = io.ReadFull(fdat, buf[:blen]) 1303 if er != nil { 1304 println(er.Error()) 1305 fdatnew.Close() 1306 fdat.Close() 1307 return 1308 } 1309 if (fl & 0x0c) == 0x0c { 1310 //println("Block", height, "is already compressed = copy over", size, "bytes at offset", offs) 1311 cbts = buf[:blen] 1312 ignored_cnt++ 1313 } else { 1314 //println("Block", height, " - compressing", size, "bytes at offset", offs) 1315 var bb []byte 1316 if (fl & 4) == 4 { 1317 //println("Block", sl.Height(), " - re-compressing") 1318 bb = decomp_block(fl, buf[:blen]) 1319 recompd_cnt++ 1320 } else { 1321 bb = buf[:blen] 1322 done_cnt++ 1323 } 1324 cbts = snappy.Encode(nil, bb) 1325 } 1326 sl.sl[0] |= 0x0C // set snappy and compressed flag 1327 binary.LittleEndian.PutUint64(sl.sl[40:48], fdaynew_offs) 1328 binary.LittleEndian.PutUint32(sl.sl[48:52], uint32(len(cbts))) 1329 fdatnew.Write(cbts) 1330 fdaynew_offs += uint64(len(cbts)) 1331 } 1332 } 1333 fdatnew.Close() 1334 fdat.Close() 1335 fmt.Println("Blocks comprtessed:", done_cnt, " re-compressed:", recompd_cnt, " ignored:", ignored_cnt) 1336 if done_cnt == 0 && recompd_cnt == 0 { 1337 fmt.Println("Nothing done") 1338 os.Remove(fl_dir + fl_compress + ".tmp") 1339 } else { 1340 ioutil.WriteFile("blockchain.tmp", dat, 0600) 1341 os.Rename("blockchain.tmp", "blockchain.new") 1342 os.Rename(fl_dir+fl_compress+".tmp", fl_dir+fl_compress) 1343 fmt.Println("blockchain.new updated") 1344 fmt.Println(fl_dir+fl_compress, "updated") 1345 } 1346 return 1347 } 1348 1349 if fl_ord != "" { 1350 var ofr, oto uint64 1351 1352 xx := strings.Split(fl_ord, "-") 1353 ofr, er = strconv.ParseUint(xx[0], 10, 32) 1354 if er != nil || ofr < 767430 { 1355 ofr = 767430 // there are no files before this block (in bitcoin blockchain) 1356 } 1357 if len(xx) > 1 { 1358 oto, er = strconv.ParseUint(xx[1], 10, 32) 1359 if er != nil { 1360 oto = 0xffffffff 1361 } 1362 } 1363 if oto < ofr { 1364 if oto > 0 && oto < 100e3 { 1365 fmt.Println("Checking ords from the last", oto, "blocks") 1366 sl := new_sl(dat[len(dat)-136:]) 1367 ofr = uint64(sl.Height()) - oto + 1 1368 oto = uint64(sl.Height()) 1369 } else { 1370 oto = ofr 1371 } 1372 } 1373 1374 var tot_txs, tot_siz, tot_wht uint 1375 var tot_otxs, tot_osiz, tot_owht uint 1376 1377 os.Mkdir("ord", 0600) 1378 var ord_cnt uint64 1379 for off := 0; off < len(dat); off += 136 { 1380 sl := new_sl(dat[off : off+136]) 1381 if he := uint64(sl.Height()); he >= ofr && he <= oto { 1382 f, er := open_dat_file(sl.DatIdx()) 1383 if er != nil { 1384 println(er.Error()) 1385 return 1386 } 1387 bu := buf[:int(sl.DLen())] 1388 f.Seek(int64(sl.DPos()), os.SEEK_SET) 1389 f.Read(bu) 1390 f.Close() 1391 bld := decomp_block(sl.Flags(), bu) 1392 if !bytes.Contains(bld, []byte{0x00, 0x63, 0x03, 0x6f, 0x72, 0x64}) { 1393 continue 1394 } 1395 bl, er := btc.NewBlock(bld) 1396 if er != nil { 1397 println(er.Error()) 1398 return 1399 } 1400 bl.BuildTxList() 1401 1402 tot_txs += uint(bl.TxCount) 1403 tot_siz += uint(len(bl.Raw)) 1404 tot_wht += bl.BlockWeight 1405 tot_otxs += bl.OrbTxCnt 1406 tot_osiz += bl.OrbTxSize 1407 tot_owht += bl.OrbTxWeight 1408 1409 if !fl_ox { 1410 fmt.Printf("In block #%d ordinals took %2d%% of txs (%4d), %2d%% of Size (%7d) and %2d%% of Weight (%7d)\n", 1411 sl.Height(), 100*bl.OrbTxCnt/uint(bl.TxCount), bl.TxCount, 100*bl.OrbTxSize/uint(len(bl.Raw)), len(bl.Raw), 1412 100*bl.OrbTxWeight/bl.BlockWeight, bl.BlockWeight) 1413 continue 1414 } 1415 1416 for _, tx := range bl.Txs { 1417 if yes, sws := tx.ContainsOrdFile(false); yes { 1418 if true { 1419 for idx, sw := range sws { 1420 if len(sw) > 39 && sw[0] == 0x20 && sw[37] == 0x6f && sw[38] == 0x72 && sw[39] == 0x64 { 1421 typ, data, er := ExtractOrdFile(sw) 1422 if er != nil { 1423 println(er.Error()) 1424 println(hex.EncodeToString(sw)) 1425 println("exiting...") 1426 return 1427 } 1428 //println("block", sl.Height(), "has tx", tx.Hash.String(), "len", string(typ), "-", len(data), "bytes") 1429 if true { 1430 ext := typ 1431 tps := strings.SplitN(string(typ), "/", 2) 1432 if len(tps) == 2 { 1433 ext = tps[1] 1434 } 1435 ioutil.WriteFile(fmt.Sprint("ord/", sl.Height(), "-", tx.Hash.String(), "-", idx, ".", ext), data, 0700) 1436 } 1437 ord_cnt++ 1438 } 1439 } 1440 } else { 1441 ioutil.WriteFile(fmt.Sprint("ord/", sl.Height(), "-", tx.Hash.String(), ".tx"), tx.Raw, 0700) 1442 ord_cnt++ 1443 } 1444 } 1445 } 1446 } 1447 } 1448 if fl_ox { 1449 fmt.Println(ord_cnt, "ord files found") 1450 } else { 1451 fmt.Printf("Averagle blocks occupation: %d%% txs, %d%% bytes, %d%% weight\n", 1452 100*tot_otxs/tot_txs, 100*tot_osiz/tot_siz, 100*tot_owht/tot_wht) 1453 } 1454 return 1455 } 1456 1457 var minbh, maxbh, valididx, validlen, blockondisk, minbhondisk uint32 1458 var tot_len, tot_size, tot_size_bad uint64 1459 var snap_cnt, gzip_cnt, uncompr_cnt int 1460 minbh = binary.LittleEndian.Uint32(dat[36:40]) 1461 maxbh = minbh 1462 minbhondisk = 0xffffffff 1463 for off := 0; off < len(dat); off += 136 { 1464 sl := new_sl(dat[off : off+136]) 1465 1466 fl := sl.Flags() 1467 if (fl & 4) != 0 { 1468 if (fl & 8) != 0 { 1469 snap_cnt++ 1470 } else { 1471 gzip_cnt++ 1472 } 1473 } else { 1474 uncompr_cnt++ 1475 } 1476 1477 dlen := sl.DLen() 1478 didx := sl.DatIdx() 1479 1480 tot_len += uint64(dlen) 1481 1482 s := sl.Size() 1483 if s == 0 { 1484 tot_size_bad++ 1485 } else { 1486 tot_size += uint64(s) 1487 } 1488 1489 bh := sl.Height() 1490 if bh > maxbh { 1491 maxbh = bh 1492 } else if bh < minbh { 1493 minbh = bh 1494 } 1495 if didx != 0xffffffff { 1496 valididx++ 1497 } 1498 if dlen != 0 { 1499 validlen++ 1500 } 1501 if didx != 0xffffffff && dlen != 0 { 1502 if fi, er := os.Stat(dat_fname(didx)); er == nil && fi.Size() >= int64(sl.DPos())+int64(dlen) { 1503 blockondisk++ 1504 if bh < minbhondisk { 1505 minbhondisk = bh 1506 } 1507 } 1508 } 1509 1510 } 1511 fmt.Println("Block heights from", minbh, "to", maxbh) 1512 fmt.Println(blockondisk, "blocks stored on disk, from height", minbhondisk) 1513 fmt.Println(uncompr_cnt, "uncompressed, ", gzip_cnt, "compressed with gzip and ", snap_cnt, "with snappy") 1514 fmt.Println("Number of records with valid length:", validlen) 1515 fmt.Println("Number of records with valid data file:", valididx) 1516 fmt.Println("Total size of all compressed blocks:", tot_len) 1517 fmt.Println("Total size of all uncompressed blocks:", tot_size) 1518 if tot_size_bad > 0 { 1519 fmt.Println("WARNING: Total size did not account for", tot_size_bad, "blocks") 1520 } 1521 }