github.com/ConsenSys/Quorum@v20.10.0+incompatible/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"io"
    23  	"os"
    24  	"path/filepath"
    25  	"runtime"
    26  	"strconv"
    27  	"sync/atomic"
    28  	"time"
    29  
    30  	"github.com/ethereum/go-ethereum/cmd/utils"
    31  	"github.com/ethereum/go-ethereum/common"
    32  	"github.com/ethereum/go-ethereum/console"
    33  	"github.com/ethereum/go-ethereum/core"
    34  	"github.com/ethereum/go-ethereum/core/rawdb"
    35  	"github.com/ethereum/go-ethereum/core/state"
    36  	"github.com/ethereum/go-ethereum/core/types"
    37  	"github.com/ethereum/go-ethereum/eth/downloader"
    38  	"github.com/ethereum/go-ethereum/event"
    39  	"github.com/ethereum/go-ethereum/log"
    40  	"github.com/ethereum/go-ethereum/trie"
    41  	"gopkg.in/urfave/cli.v1"
    42  )
    43  
    44  var (
    45  	initCommand = cli.Command{
    46  		Action:    utils.MigrateFlags(initGenesis),
    47  		Name:      "init",
    48  		Usage:     "Bootstrap and initialize a new genesis block",
    49  		ArgsUsage: "<genesisPath>",
    50  		Flags: []cli.Flag{
    51  			utils.DataDirFlag,
    52  		},
    53  		Category: "BLOCKCHAIN COMMANDS",
    54  		Description: `
    55  The init command initializes a new genesis block and definition for the network.
    56  This is a destructive action and changes the network in which you will be
    57  participating.
    58  
    59  It expects the genesis file as argument.`,
    60  	}
    61  	importCommand = cli.Command{
    62  		Action:    utils.MigrateFlags(importChain),
    63  		Name:      "import",
    64  		Usage:     "Import a blockchain file",
    65  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    66  		Flags: []cli.Flag{
    67  			utils.DataDirFlag,
    68  			utils.CacheFlag,
    69  			utils.SyncModeFlag,
    70  			utils.GCModeFlag,
    71  			utils.CacheDatabaseFlag,
    72  			utils.CacheGCFlag,
    73  		},
    74  		Category: "BLOCKCHAIN COMMANDS",
    75  		Description: `
    76  The import command imports blocks from an RLP-encoded form. The form can be one file
    77  with several RLP-encoded blocks, or several files can be used.
    78  
    79  If only one file is used, import error will result in failure. If several files are used,
    80  processing will proceed even if an individual RLP-file import failure occurs.`,
    81  	}
    82  	exportCommand = cli.Command{
    83  		Action:    utils.MigrateFlags(exportChain),
    84  		Name:      "export",
    85  		Usage:     "Export blockchain into file",
    86  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
    87  		Flags: []cli.Flag{
    88  			utils.DataDirFlag,
    89  			utils.CacheFlag,
    90  			utils.SyncModeFlag,
    91  		},
    92  		Category: "BLOCKCHAIN COMMANDS",
    93  		Description: `
    94  Requires a first argument of the file to write to.
    95  Optional second and third arguments control the first and
    96  last block to write. In this mode, the file will be appended
    97  if already existing. If the file ends with .gz, the output will
    98  be gzipped.`,
    99  	}
   100  	importPreimagesCommand = cli.Command{
   101  		Action:    utils.MigrateFlags(importPreimages),
   102  		Name:      "import-preimages",
   103  		Usage:     "Import the preimage database from an RLP stream",
   104  		ArgsUsage: "<datafile>",
   105  		Flags: []cli.Flag{
   106  			utils.DataDirFlag,
   107  			utils.CacheFlag,
   108  			utils.SyncModeFlag,
   109  		},
   110  		Category: "BLOCKCHAIN COMMANDS",
   111  		Description: `
   112  The import-preimages command imports hash preimages from an RLP encoded stream.`,
   113  	}
   114  	exportPreimagesCommand = cli.Command{
   115  		Action:    utils.MigrateFlags(exportPreimages),
   116  		Name:      "export-preimages",
   117  		Usage:     "Export the preimage database into an RLP stream",
   118  		ArgsUsage: "<dumpfile>",
   119  		Flags: []cli.Flag{
   120  			utils.DataDirFlag,
   121  			utils.CacheFlag,
   122  			utils.SyncModeFlag,
   123  		},
   124  		Category: "BLOCKCHAIN COMMANDS",
   125  		Description: `
   126  The export-preimages command export hash preimages to an RLP encoded stream`,
   127  	}
   128  	copydbCommand = cli.Command{
   129  		Action:    utils.MigrateFlags(copyDb),
   130  		Name:      "copydb",
   131  		Usage:     "Create a local chain from a target chaindata folder",
   132  		ArgsUsage: "<sourceChaindataDir>",
   133  		Flags: []cli.Flag{
   134  			utils.DataDirFlag,
   135  			utils.CacheFlag,
   136  			utils.SyncModeFlag,
   137  			utils.FakePoWFlag,
   138  			utils.TestnetFlag,
   139  			utils.RinkebyFlag,
   140  		},
   141  		Category: "BLOCKCHAIN COMMANDS",
   142  		Description: `
   143  The first argument must be the directory containing the blockchain to download from`,
   144  	}
   145  	removedbCommand = cli.Command{
   146  		Action:    utils.MigrateFlags(removeDB),
   147  		Name:      "removedb",
   148  		Usage:     "Remove blockchain and state databases",
   149  		ArgsUsage: " ",
   150  		Flags: []cli.Flag{
   151  			utils.DataDirFlag,
   152  		},
   153  		Category: "BLOCKCHAIN COMMANDS",
   154  		Description: `
   155  Remove blockchain and state databases`,
   156  	}
   157  	dumpCommand = cli.Command{
   158  		Action:    utils.MigrateFlags(dump),
   159  		Name:      "dump",
   160  		Usage:     "Dump a specific block from storage",
   161  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   162  		Flags: []cli.Flag{
   163  			utils.DataDirFlag,
   164  			utils.CacheFlag,
   165  			utils.SyncModeFlag,
   166  			utils.IterativeOutputFlag,
   167  			utils.ExcludeCodeFlag,
   168  			utils.ExcludeStorageFlag,
   169  			utils.IncludeIncompletesFlag,
   170  		},
   171  		Category: "BLOCKCHAIN COMMANDS",
   172  		Description: `
   173  The arguments are interpreted as block numbers or hashes.
   174  Use "ethereum dump 0" to dump the genesis block.`,
   175  	}
   176  	inspectCommand = cli.Command{
   177  		Action:    utils.MigrateFlags(inspect),
   178  		Name:      "inspect",
   179  		Usage:     "Inspect the storage size for each type of data in the database",
   180  		ArgsUsage: " ",
   181  		Flags: []cli.Flag{
   182  			utils.DataDirFlag,
   183  			utils.AncientFlag,
   184  			utils.CacheFlag,
   185  			utils.TestnetFlag,
   186  			utils.RinkebyFlag,
   187  			utils.GoerliFlag,
   188  			utils.SyncModeFlag,
   189  		},
   190  		Category: "BLOCKCHAIN COMMANDS",
   191  	}
   192  )
   193  
   194  // In the regular Genesis / ChainConfig struct, due to the way go deserializes
   195  // json, IsQuorum defaults to false (when not specified). Here we specify it as
   196  // a pointer so we can make the distinction and default unspecified to true.
   197  func getIsQuorum(file io.Reader) bool {
   198  	altGenesis := new(struct {
   199  		Config *struct {
   200  			IsQuorum *bool `json:"isQuorum"`
   201  		} `json:"config"`
   202  	})
   203  
   204  	if err := json.NewDecoder(file).Decode(altGenesis); err != nil {
   205  		utils.Fatalf("invalid genesis file: %v", err)
   206  	}
   207  
   208  	// unspecified defaults to true
   209  	return altGenesis.Config.IsQuorum == nil || *altGenesis.Config.IsQuorum
   210  }
   211  
   212  // initGenesis will initialise the given JSON format genesis file and writes it as
   213  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   214  func initGenesis(ctx *cli.Context) error {
   215  	// Make sure we have a valid genesis JSON
   216  	genesisPath := ctx.Args().First()
   217  	if len(genesisPath) == 0 {
   218  		utils.Fatalf("Must supply path to genesis JSON file")
   219  	}
   220  	file, err := os.Open(genesisPath)
   221  	if err != nil {
   222  		utils.Fatalf("Failed to read genesis file: %v", err)
   223  	}
   224  	defer file.Close()
   225  
   226  	genesis := new(core.Genesis)
   227  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   228  		utils.Fatalf("invalid genesis file: %v", err)
   229  	}
   230  
   231  	file.Seek(0, 0)
   232  	genesis.Config.IsQuorum = getIsQuorum(file)
   233  
   234  	// check the data given as a part of newMaxConfigData to ensure that
   235  	// its in expected order
   236  	err = genesis.Config.CheckMaxCodeConfigData()
   237  	if err != nil {
   238  		utils.Fatalf("maxCodeSize data invalid: %v", err)
   239  	}
   240  
   241  	// Open an initialise both full and light databases
   242  	stack := makeFullNode(ctx)
   243  	defer stack.Close()
   244  
   245  	for _, name := range []string{"chaindata", "lightchaindata"} {
   246  		chaindb, err := stack.OpenDatabase(name, 0, 0, "")
   247  		if err != nil {
   248  			utils.Fatalf("Failed to open database: %v", err)
   249  		}
   250  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   251  		if err != nil {
   252  			utils.Fatalf("Failed to write genesis block: %v", err)
   253  		}
   254  		chaindb.Close()
   255  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   256  	}
   257  	return nil
   258  }
   259  
   260  func importChain(ctx *cli.Context) error {
   261  	if len(ctx.Args()) < 1 {
   262  		utils.Fatalf("This command requires an argument.")
   263  	}
   264  	stack := makeFullNode(ctx)
   265  	defer stack.Close()
   266  
   267  	chain, db := utils.MakeChain(ctx, stack, true)
   268  	defer db.Close()
   269  
   270  	// Start periodically gathering memory profiles
   271  	var peakMemAlloc, peakMemSys uint64
   272  	go func() {
   273  		stats := new(runtime.MemStats)
   274  		for {
   275  			runtime.ReadMemStats(stats)
   276  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   277  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   278  			}
   279  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   280  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   281  			}
   282  			time.Sleep(5 * time.Second)
   283  		}
   284  	}()
   285  	// Import the chain
   286  	start := time.Now()
   287  
   288  	if len(ctx.Args()) == 1 {
   289  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   290  			log.Error("Import error", "err", err)
   291  		}
   292  	} else {
   293  		for _, arg := range ctx.Args() {
   294  			if err := utils.ImportChain(chain, arg); err != nil {
   295  				log.Error("Import error", "file", arg, "err", err)
   296  			}
   297  		}
   298  	}
   299  	chain.Stop()
   300  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   301  
   302  	// Output pre-compaction stats mostly to see the import trashing
   303  	stats, err := db.Stat("leveldb.stats")
   304  	if err != nil {
   305  		utils.Fatalf("Failed to read database stats: %v", err)
   306  	}
   307  	fmt.Println(stats)
   308  
   309  	ioStats, err := db.Stat("leveldb.iostats")
   310  	if err != nil {
   311  		utils.Fatalf("Failed to read database iostats: %v", err)
   312  	}
   313  	fmt.Println(ioStats)
   314  
   315  	// Print the memory statistics used by the importing
   316  	mem := new(runtime.MemStats)
   317  	runtime.ReadMemStats(mem)
   318  
   319  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   320  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   321  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   322  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   323  
   324  	if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
   325  		return nil
   326  	}
   327  
   328  	// Compact the entire database to more accurately measure disk io and print the stats
   329  	start = time.Now()
   330  	fmt.Println("Compacting entire database...")
   331  	if err = db.Compact(nil, nil); err != nil {
   332  		utils.Fatalf("Compaction failed: %v", err)
   333  	}
   334  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   335  
   336  	stats, err = db.Stat("leveldb.stats")
   337  	if err != nil {
   338  		utils.Fatalf("Failed to read database stats: %v", err)
   339  	}
   340  	fmt.Println(stats)
   341  
   342  	ioStats, err = db.Stat("leveldb.iostats")
   343  	if err != nil {
   344  		utils.Fatalf("Failed to read database iostats: %v", err)
   345  	}
   346  	fmt.Println(ioStats)
   347  	return nil
   348  }
   349  
   350  func exportChain(ctx *cli.Context) error {
   351  	if len(ctx.Args()) < 1 {
   352  		utils.Fatalf("This command requires an argument.")
   353  	}
   354  	stack := makeFullNode(ctx)
   355  	defer stack.Close()
   356  
   357  	chain, _ := utils.MakeChain(ctx, stack, true)
   358  	start := time.Now()
   359  
   360  	var err error
   361  	fp := ctx.Args().First()
   362  	if len(ctx.Args()) < 3 {
   363  		err = utils.ExportChain(chain, fp)
   364  	} else {
   365  		// This can be improved to allow for numbers larger than 9223372036854775807
   366  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   367  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   368  		if ferr != nil || lerr != nil {
   369  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   370  		}
   371  		if first < 0 || last < 0 {
   372  			utils.Fatalf("Export error: block number must be greater than 0\n")
   373  		}
   374  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   375  	}
   376  
   377  	if err != nil {
   378  		utils.Fatalf("Export error: %v\n", err)
   379  	}
   380  	fmt.Printf("Export done in %v\n", time.Since(start))
   381  	return nil
   382  }
   383  
   384  // importPreimages imports preimage data from the specified file.
   385  func importPreimages(ctx *cli.Context) error {
   386  	if len(ctx.Args()) < 1 {
   387  		utils.Fatalf("This command requires an argument.")
   388  	}
   389  	stack := makeFullNode(ctx)
   390  	defer stack.Close()
   391  
   392  	db := utils.MakeChainDatabase(ctx, stack)
   393  	start := time.Now()
   394  
   395  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   396  		utils.Fatalf("Import error: %v\n", err)
   397  	}
   398  	fmt.Printf("Import done in %v\n", time.Since(start))
   399  	return nil
   400  }
   401  
   402  // exportPreimages dumps the preimage data to specified json file in streaming way.
   403  func exportPreimages(ctx *cli.Context) error {
   404  	if len(ctx.Args()) < 1 {
   405  		utils.Fatalf("This command requires an argument.")
   406  	}
   407  	stack := makeFullNode(ctx)
   408  	defer stack.Close()
   409  
   410  	db := utils.MakeChainDatabase(ctx, stack)
   411  	start := time.Now()
   412  
   413  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   414  		utils.Fatalf("Export error: %v\n", err)
   415  	}
   416  	fmt.Printf("Export done in %v\n", time.Since(start))
   417  	return nil
   418  }
   419  
   420  func copyDb(ctx *cli.Context) error {
   421  	// Ensure we have a source chain directory to copy
   422  	if len(ctx.Args()) < 1 {
   423  		utils.Fatalf("Source chaindata directory path argument missing")
   424  	}
   425  	if len(ctx.Args()) < 2 {
   426  		utils.Fatalf("Source ancient chain directory path argument missing")
   427  	}
   428  	// Initialize a new chain for the running node to sync into
   429  	stack := makeFullNode(ctx)
   430  	defer stack.Close()
   431  
   432  	chain, chainDb := utils.MakeChain(ctx, stack, false)
   433  	syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   434  
   435  	var syncBloom *trie.SyncBloom
   436  	if syncMode == downloader.FastSync {
   437  		syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb)
   438  	}
   439  	dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil)
   440  
   441  	// Create a source peer to satisfy downloader requests from
   442  	db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "")
   443  	if err != nil {
   444  		return err
   445  	}
   446  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   447  	if err != nil {
   448  		return err
   449  	}
   450  	peer := downloader.NewFakePeer("local", db, hc, dl)
   451  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   452  		return err
   453  	}
   454  	// Synchronise with the simulated peer
   455  	start := time.Now()
   456  
   457  	currentHeader := hc.CurrentHeader()
   458  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil {
   459  		return err
   460  	}
   461  	for dl.Synchronising() {
   462  		time.Sleep(10 * time.Millisecond)
   463  	}
   464  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   465  
   466  	// Compact the entire database to remove any sync overhead
   467  	start = time.Now()
   468  	fmt.Println("Compacting entire database...")
   469  	if err = db.Compact(nil, nil); err != nil {
   470  		utils.Fatalf("Compaction failed: %v", err)
   471  	}
   472  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   473  	return nil
   474  }
   475  
   476  func removeDB(ctx *cli.Context) error {
   477  	stack, config := makeConfigNode(ctx)
   478  
   479  	// Remove the full node state database
   480  	path := stack.ResolvePath("chaindata")
   481  	if common.FileExist(path) {
   482  		confirmAndRemoveDB(path, "full node state database")
   483  	} else {
   484  		log.Info("Full node state database missing", "path", path)
   485  	}
   486  	// Remove the full node ancient database
   487  	path = config.Eth.DatabaseFreezer
   488  	switch {
   489  	case path == "":
   490  		path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   491  	case !filepath.IsAbs(path):
   492  		path = config.Node.ResolvePath(path)
   493  	}
   494  	if common.FileExist(path) {
   495  		confirmAndRemoveDB(path, "full node ancient database")
   496  	} else {
   497  		log.Info("Full node ancient database missing", "path", path)
   498  	}
   499  	// Remove the light node database
   500  	path = stack.ResolvePath("lightchaindata")
   501  	if common.FileExist(path) {
   502  		confirmAndRemoveDB(path, "light node database")
   503  	} else {
   504  		log.Info("Light node database missing", "path", path)
   505  	}
   506  	return nil
   507  }
   508  
   509  // confirmAndRemoveDB prompts the user for a last confirmation and removes the
   510  // folder if accepted.
   511  func confirmAndRemoveDB(database string, kind string) {
   512  	confirm, err := console.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
   513  	switch {
   514  	case err != nil:
   515  		utils.Fatalf("%v", err)
   516  	case !confirm:
   517  		log.Info("Database deletion skipped", "path", database)
   518  	default:
   519  		start := time.Now()
   520  		filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
   521  			// If we're at the top level folder, recurse into
   522  			if path == database {
   523  				return nil
   524  			}
   525  			// Delete all the files, but not subfolders
   526  			if !info.IsDir() {
   527  				os.Remove(path)
   528  				return nil
   529  			}
   530  			return filepath.SkipDir
   531  		})
   532  		log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
   533  	}
   534  }
   535  
   536  func dump(ctx *cli.Context) error {
   537  	stack := makeFullNode(ctx)
   538  	defer stack.Close()
   539  
   540  	chain, chainDb := utils.MakeChain(ctx, stack, false)
   541  	defer chainDb.Close()
   542  	for _, arg := range ctx.Args() {
   543  		var block *types.Block
   544  		if hashish(arg) {
   545  			block = chain.GetBlockByHash(common.HexToHash(arg))
   546  		} else {
   547  			num, _ := strconv.Atoi(arg)
   548  			block = chain.GetBlockByNumber(uint64(num))
   549  		}
   550  		if block == nil {
   551  			fmt.Println("{}")
   552  			utils.Fatalf("block not found")
   553  		} else {
   554  			state, err := state.New(block.Root(), state.NewDatabase(chainDb))
   555  			if err != nil {
   556  				utils.Fatalf("could not create new state: %v", err)
   557  			}
   558  			excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name)
   559  			excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name)
   560  			includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name)
   561  			if ctx.Bool(utils.IterativeOutputFlag.Name) {
   562  				state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout))
   563  			} else {
   564  				if includeMissing {
   565  					fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" +
   566  						" otherwise the accounts will overwrite each other in the resulting mapping.")
   567  				}
   568  				fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false))
   569  			}
   570  		}
   571  	}
   572  	return nil
   573  }
   574  
   575  func inspect(ctx *cli.Context) error {
   576  	node, _ := makeConfigNode(ctx)
   577  	defer node.Close()
   578  
   579  	_, chainDb := utils.MakeChain(ctx, node, false)
   580  	defer chainDb.Close()
   581  
   582  	return rawdb.InspectDatabase(chainDb)
   583  }
   584  
   585  // hashish returns true for strings that look like hashes.
   586  func hashish(x string) bool {
   587  	_, err := strconv.Atoi(x)
   588  	return err != nil
   589  }