github.com/ethereumproject/go-ethereum@v5.5.2+incompatible/accounts/cachedb.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package accounts
    18  
    19  import (
    20  	"bufio"
    21  	"encoding/json"
    22  	"io/ioutil"
    23  	"os"
    24  	"path/filepath"
    25  	"time"
    26  
    27  	"bytes"
    28  	"errors"
    29  	"fmt"
    30  	"runtime"
    31  	"sort"
    32  	"sync"
    33  
    34  	"github.com/boltdb/bolt"
    35  	"github.com/ethereumproject/go-ethereum/common"
    36  	"github.com/ethereumproject/go-ethereum/logger"
    37  	"github.com/ethereumproject/go-ethereum/logger/glog"
    38  	"github.com/mailru/easyjson"
    39  )
    40  
    41  var addrBucketName = []byte("byAddr")
    42  var fileBucketName = []byte("byFile")
    43  var statsBucketName = []byte("stats")
    44  var ErrCacheDBNoUpdateStamp = errors.New("cachedb has no updated timestamp; expected for newborn dbs.")
    45  
    46  // addrCache is a live index of all accounts in the keystore.
    47  type cacheDB struct {
    48  	keydir   string
    49  	watcher  *watcher
    50  	mu       sync.Mutex
    51  	throttle *time.Timer
    52  	db       *bolt.DB
    53  }
    54  
    55  func newCacheDB(keydir string) *cacheDB {
    56  	if e := os.MkdirAll(keydir, os.ModePerm); e != nil {
    57  		panic(e)
    58  	}
    59  
    60  	dbpath := filepath.Join(keydir, "accounts.db")
    61  	bdb, e := bolt.Open(dbpath, 0600, nil) // TODO configure more?
    62  	if e != nil {
    63  		panic(e)
    64  	}
    65  
    66  	cdb := &cacheDB{
    67  		db: bdb,
    68  	}
    69  	cdb.keydir = keydir
    70  
    71  	if e := cdb.db.Update(func(tx *bolt.Tx) error {
    72  		if _, e := tx.CreateBucketIfNotExists(addrBucketName); e != nil {
    73  			return e
    74  		}
    75  		if _, e := tx.CreateBucketIfNotExists(fileBucketName); e != nil {
    76  			return e
    77  		}
    78  		if _, e := tx.CreateBucketIfNotExists(statsBucketName); e != nil {
    79  			return e
    80  		}
    81  		return nil
    82  	}); e != nil {
    83  		panic(e)
    84  	}
    85  
    86  	return cdb
    87  }
    88  
    89  // Getter functions to implement caching interface.
    90  func (cdb *cacheDB) muLock() {
    91  	cdb.mu.Lock()
    92  }
    93  
    94  func (cdb *cacheDB) muUnlock() {
    95  	cdb.mu.Unlock()
    96  }
    97  
    98  func (cdb *cacheDB) getKeydir() string {
    99  	return cdb.keydir
   100  }
   101  
   102  func (cdb *cacheDB) getWatcher() *watcher {
   103  	return cdb.watcher
   104  }
   105  
   106  func (cdb *cacheDB) getThrottle() *time.Timer {
   107  	return cdb.throttle
   108  }
   109  
   110  func (cdb *cacheDB) maybeReload() {
   111  	// do nothing (implements caching interface)
   112  }
   113  
   114  func (cdb *cacheDB) reload() {
   115  	// do nothing (implements caching interface)
   116  }
   117  
   118  // Gets all accounts _byFile_, which contains and possibly exceed byAddr content
   119  // because it may contain dupe address/key pairs (given dupe files)
   120  func (cdb *cacheDB) accounts() []Account {
   121  	var as []Account
   122  	if e := cdb.db.View(func(tx *bolt.Tx) error {
   123  		b := tx.Bucket(fileBucketName)
   124  		c := b.Cursor()
   125  
   126  		for k, v := c.First(); k != nil; k, v = c.Next() {
   127  			a := bytesToAccount(v)
   128  			a.File = string(k)
   129  			as = append(as, a)
   130  		}
   131  
   132  		return nil
   133  	}); e != nil {
   134  		panic(e)
   135  	}
   136  
   137  	sort.Sort(accountsByFile(as)) // this is important for getting AccountByIndex
   138  
   139  	cpy := make([]Account, len(as))
   140  	copy(cpy, as)
   141  
   142  	return cpy
   143  }
   144  
   145  // note, again, that this return an _slice_
   146  func (cdb *cacheDB) getCachedAccountsByAddress(addr common.Address) (accounts []Account, err error) {
   147  	err = cdb.db.View(func(tx *bolt.Tx) error {
   148  		c := tx.Bucket(addrBucketName).Cursor()
   149  
   150  		prefix := []byte(addr.Hex())
   151  		for k, _ := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, _ = c.Next() {
   152  			accounts = append(accounts, Account{Address: addr, File: string(bytes.Replace(k, prefix, []byte(""), 1))})
   153  		}
   154  		return nil
   155  	})
   156  	if err == nil && (len(accounts) == 0) {
   157  		return accounts, ErrNoMatch
   158  	}
   159  	return accounts, err
   160  }
   161  
   162  // ... and this returns an Account
   163  func (cdb *cacheDB) getCachedAccountByFile(file string) (account Account, err error) {
   164  	if file == "" {
   165  		return Account{}, ErrNoMatch
   166  	}
   167  	err = cdb.db.View(func(tx *bolt.Tx) error {
   168  		b := tx.Bucket(fileBucketName)
   169  		if v := b.Get([]byte(file)); v != nil {
   170  			account = bytesToAccount(v)
   171  		}
   172  		return nil
   173  	})
   174  	if err == nil && (account == Account{}) {
   175  		return account, ErrNoMatch
   176  	}
   177  	return account, err
   178  }
   179  
   180  func (cdb *cacheDB) hasAddress(addr common.Address) bool {
   181  	as, e := cdb.getCachedAccountsByAddress(addr)
   182  	return e == nil && len(as) > 0
   183  }
   184  
   185  // add makes the assumption that if the account is not cached by file, it won't be listed
   186  // by address either. thusly, when and iff it adds an account to the cache(s), it adds bothly.
   187  func (cdb *cacheDB) add(newAccount Account) {
   188  	defer cdb.setLastUpdated()
   189  	cdb.db.Update(func(tx *bolt.Tx) error {
   190  		newAccount.File = filepath.Base(newAccount.File)
   191  		if newAccount.File != "" {
   192  			bf := tx.Bucket(fileBucketName)
   193  			bf.Put([]byte(newAccount.File), accountToBytes(newAccount))
   194  		}
   195  		if (newAccount.Address != common.Address{}) {
   196  			b := tx.Bucket(addrBucketName)
   197  			return b.Put([]byte(newAccount.Address.Hex()+newAccount.File), []byte(time.Now().String()))
   198  		}
   199  		return nil
   200  	})
   201  }
   202  
   203  // note: removed needs to be unique here (i.e. both File and Address must be set).
   204  func (cdb *cacheDB) delete(removed Account) {
   205  	defer cdb.setLastUpdated()
   206  	if e := cdb.db.Update(func(tx *bolt.Tx) error {
   207  		removed.File = filepath.Base(removed.File)
   208  
   209  		b := tx.Bucket(fileBucketName)
   210  		if e := b.Delete([]byte(removed.File)); e != nil {
   211  			return e
   212  		}
   213  
   214  		ba := tx.Bucket(addrBucketName)
   215  		if e := ba.Delete([]byte(removed.Address.Hex() + removed.File)); e != nil {
   216  			return e
   217  		}
   218  		return nil
   219  	}); e != nil {
   220  		glog.V(logger.Error).Infof("failed to delete from cache: %v \n%v", e, removed.File)
   221  	}
   222  }
   223  
   224  // find returns the cached account for address if there is a unique match.
   225  // The exact matching rules are explained by the documentation of Account.
   226  // Callers must hold ac.mu.
   227  func (cdb *cacheDB) find(a Account) (Account, error) {
   228  
   229  	var acc Account
   230  	var matches []Account
   231  	var e error
   232  
   233  	if a.File != "" {
   234  		acc, e = cdb.getCachedAccountByFile(a.File)
   235  		if e == nil && (acc != Account{}) {
   236  			return acc, e
   237  		}
   238  		// no other possible way
   239  		if a.Address.IsEmpty() {
   240  			return Account{}, ErrNoMatch
   241  		}
   242  	}
   243  
   244  	if (a.Address != common.Address{}) {
   245  		matches, e = cdb.getCachedAccountsByAddress(a.Address)
   246  	}
   247  
   248  	switch len(matches) {
   249  	case 1:
   250  		return matches[0], e
   251  	case 0:
   252  		return Account{}, ErrNoMatch
   253  	default:
   254  		err := &AmbiguousAddrError{Addr: a.Address, Matches: make([]Account, len(matches))}
   255  		copy(err.Matches, matches)
   256  		return Account{}, err
   257  	}
   258  }
   259  
   260  func (cdb *cacheDB) close() {
   261  	cdb.mu.Lock()
   262  	cdb.db.Close()
   263  	cdb.mu.Unlock()
   264  }
   265  
   266  func (cdb *cacheDB) setLastUpdated() error {
   267  	return cdb.db.Update(func(tx *bolt.Tx) error {
   268  		b := tx.Bucket(statsBucketName)
   269  		return b.Put([]byte("lastUpdated"), []byte(time.Now().Add(minReloadInterval).String())) // ensure no close calls with directory mod time
   270  	})
   271  }
   272  
   273  func (cdb *cacheDB) getLastUpdated() (t time.Time, err error) {
   274  	e := cdb.db.View(func(tx *bolt.Tx) error {
   275  		b := tx.Bucket(statsBucketName)
   276  		v := b.Get([]byte("lastUpdated"))
   277  		if v == nil {
   278  			t, err = time.Parse("2006-01-02 15:04:05.999999999 -0700 MST", "1900-01-02 15:04:05.999999999 -0700 MST")
   279  			return ErrCacheDBNoUpdateStamp
   280  		}
   281  		pt, e := time.Parse("2006-01-02 15:04:05.999999999 -0700 MST", string(v))
   282  		if e != nil {
   283  			return e
   284  		}
   285  		t = pt
   286  		return nil
   287  	})
   288  	return t, e
   289  }
   290  
   291  // setBatchAccounts sets many accounts in a single db tx.
   292  // It saves a lot of time in disk write.
   293  func (cdb *cacheDB) setBatchAccounts(accs []Account) (errs []error) {
   294  	if len(accs) == 0 {
   295  		return nil
   296  	}
   297  	defer cdb.setLastUpdated()
   298  
   299  	tx, err := cdb.db.Begin(true)
   300  	if err != nil {
   301  		return append(errs, err)
   302  	}
   303  
   304  	ba := tx.Bucket(addrBucketName)
   305  	bf := tx.Bucket(fileBucketName)
   306  
   307  	for _, a := range accs {
   308  		// Put in byAddr bucket.
   309  		if e := ba.Put([]byte(a.Address.Hex()+a.File), []byte(time.Now().String())); e != nil {
   310  			errs = append(errs, e)
   311  		}
   312  		// Put in byFile bucket.
   313  		if e := bf.Put([]byte(a.File), accountToBytes(a)); e != nil {
   314  			errs = append(errs, e)
   315  		}
   316  	}
   317  
   318  	if len(errs) == 0 {
   319  		// Close tx.
   320  		if err := tx.Commit(); err != nil {
   321  			return append(errs, err)
   322  		}
   323  	} else {
   324  		tx.Rollback()
   325  	}
   326  	return errs
   327  }
   328  
   329  // Syncfs2db syncronises an existing cachedb with a corresponding fs.
   330  func (cdb *cacheDB) Syncfs2db(lastUpdated time.Time) (errs []error) {
   331  
   332  	// Check if directory was modified. Makes function somewhat idempotent...
   333  	//di, de := os.Stat(cdb.keydir)
   334  	//if de != nil {
   335  	//	return append(errs, de)
   336  	//}
   337  	// ... but I don't trust/know when directory stamps get modified (ie for tests).
   338  	//dbLastMod, lue := cdb.getLastUpdated()
   339  	//if lue != nil {
   340  	//	errs = append(errs, lue)
   341  	//} else {
   342  	//	directoryLastMod := di.ModTime()
   343  	//	if dbLastMod.After(directoryLastMod) {
   344  	//		glog.V(logger.Info).Info("Directory has not been modified since DB was updated. Not syncing.")
   345  	//		return errs
   346  	//	}
   347  	//}
   348  
   349  	defer cdb.setLastUpdated()
   350  
   351  	var (
   352  		accounts []Account
   353  	)
   354  
   355  	// SYNC: DB --> FS.
   356  
   357  	// Remove all cache entries.
   358  	e := cdb.db.Update(func(tx *bolt.Tx) error {
   359  
   360  		tx.DeleteBucket(addrBucketName)
   361  		tx.DeleteBucket(fileBucketName)
   362  		if _, e := tx.CreateBucketIfNotExists(addrBucketName); e != nil {
   363  			return e
   364  		}
   365  		if _, e := tx.CreateBucketIfNotExists(fileBucketName); e != nil {
   366  			return e
   367  		}
   368  
   369  		return nil
   370  	})
   371  
   372  	files, err := ioutil.ReadDir(cdb.keydir)
   373  	if err != nil {
   374  		return append(errs, err)
   375  	}
   376  	numFiles := len(files)
   377  
   378  	glog.V(logger.Debug).Infof("Syncing index db: %v files", numFiles)
   379  
   380  	waitUp := &sync.WaitGroup{}
   381  	achan := make(chan Account)
   382  	echan := make(chan error)
   383  	done := make(chan bool, 1)
   384  
   385  	// SYNC: FS --> DB.
   386  
   387  	// Handle receiving errors/accounts.
   388  	go func(wg *sync.WaitGroup, achan chan Account, echan chan error) {
   389  
   390  		for j := 0; j < len(files); j++ {
   391  			select {
   392  			case a := <-achan:
   393  				if (a == Account{}) {
   394  					continue
   395  				}
   396  				accounts = append(accounts, a)
   397  				if len(accounts) == 20000 {
   398  					if e := cdb.setBatchAccounts(accounts); len(e) != 0 {
   399  						for _, ee := range e {
   400  							if ee != nil {
   401  								errs = append(errs, e...)
   402  							}
   403  						}
   404  					}
   405  					accounts = nil
   406  				}
   407  			case e := <-echan:
   408  				if e != nil {
   409  					errs = append(errs, e)
   410  				}
   411  			}
   412  		}
   413  
   414  		waitUp.Wait()
   415  		close(achan)
   416  		close(echan)
   417  
   418  		if e := cdb.setBatchAccounts(accounts); len(e) != 0 {
   419  			for _, ee := range e {
   420  				if ee != nil {
   421  					errs = append(errs, e...)
   422  				}
   423  			}
   424  		}
   425  
   426  		done <- true
   427  	}(waitUp, achan, echan)
   428  
   429  	// Iterate files.
   430  	for i, fi := range files {
   431  
   432  		// fi.Name() is used for storing the file in the case db
   433  		// This assumes that the keystore/ dir is not designed to walk recursively.
   434  		// See testdata/keystore/foo/UTC-afd..... compared with cacheTestAccounts for
   435  		// test proof of this assumption.
   436  		path := filepath.Join(cdb.keydir, fi.Name())
   437  		if e != nil {
   438  			errs = append(errs, e)
   439  		}
   440  		waitUp.Add(1)
   441  		// TODO: inform go routine allowance based on memory statistics
   442  		if runtime.NumGoroutine() > runtime.NumCPU()*300 {
   443  
   444  			processKeyFile(waitUp, path, fi, i, numFiles, achan, echan)
   445  		} else {
   446  			go processKeyFile(waitUp, path, fi, i, numFiles, achan, echan)
   447  		}
   448  	}
   449  
   450  	<-done
   451  
   452  	accounts = nil
   453  
   454  	for _, e := range errs {
   455  		if e != nil {
   456  			glog.V(logger.Debug).Infof("Error: %v", e)
   457  		}
   458  	}
   459  
   460  	return errs
   461  }
   462  
   463  // it is important this send one value of one of either account OR error channels
   464  func processKeyFile(wg *sync.WaitGroup, path string, fi os.FileInfo, i int, numFiles int, aChan chan Account, errs chan error) {
   465  	defer wg.Done()
   466  	if skipKeyFile(fi) {
   467  		glog.V(logger.Debug).Infof("(%v/%v) Ignoring file %s", i, numFiles, fi.Name())
   468  		errs <- nil
   469  		return
   470  	} else {
   471  
   472  		glog.V(logger.Debug).Infof("(%v/%v) Adding key file to db: %v", i, numFiles, fi.Name())
   473  
   474  		keyJSON := struct {
   475  			Address common.Address `json:"address"`
   476  		}{}
   477  
   478  		buf := new(bufio.Reader)
   479  		fd, err := os.Open(path)
   480  		if err != nil {
   481  			errs <- err
   482  			return
   483  		}
   484  		buf.Reset(fd)
   485  		// Parse the address.
   486  		keyJSON.Address = common.Address{}
   487  		err = json.NewDecoder(buf).Decode(&keyJSON)
   488  		fd.Close()
   489  
   490  		web3JSON := []byte{}
   491  		web3JSON, err = ioutil.ReadFile(path)
   492  		if err != nil {
   493  			errs <- err
   494  			return
   495  		}
   496  
   497  		switch {
   498  		case err != nil:
   499  			glog.V(logger.Debug).Infof("(%v/%v) can't decode key %s: %v", i, numFiles, path, err)
   500  			errs <- err
   501  		case keyJSON.Address.IsEmpty():
   502  			glog.V(logger.Debug).Infof("(%v/%v) can't decode key %s: missing or zero address", i, numFiles, path)
   503  			errs <- fmt.Errorf("(%v/%v) can't decode key %s: missing or zero address", i, numFiles, path)
   504  		default:
   505  			aChan <- Account{Address: keyJSON.Address, File: fi.Name(), EncryptedKey: string(web3JSON)}
   506  		}
   507  	}
   508  }
   509  
   510  func bytesToAccount(bs []byte) Account {
   511  	var aux AccountJSON
   512  	if e := easyjson.Unmarshal(bs, &aux); e != nil {
   513  		panic(e)
   514  		//return Account{}
   515  	}
   516  	return Account{
   517  		Address:      common.HexToAddress(aux.Address),
   518  		EncryptedKey: aux.EncryptedKey,
   519  		File:         aux.File,
   520  	}
   521  }
   522  
   523  func accountToBytes(account Account) []byte {
   524  	aux := &AccountJSON{
   525  		Address:      account.Address.Hex(),
   526  		EncryptedKey: account.EncryptedKey,
   527  		File:         account.File,
   528  	}
   529  	b, e := easyjson.Marshal(aux)
   530  	if e != nil {
   531  		panic(e)
   532  		// return nil
   533  	}
   534  	return b
   535  }