github.com/daragao/go-ethereum@v1.8.14-0.20180809141559-45eaef243198/swarm/storage/ldbstore.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // disk storage layer for the package bzz
    18  // DbStore implements the ChunkStore interface and is used by the FileStore as
    19  // persistent storage of chunks
    20  // it implements purging based on access count allowing for external control of
    21  // max capacity
    22  
    23  package storage
    24  
    25  import (
    26  	"archive/tar"
    27  	"bytes"
    28  	"context"
    29  	"encoding/binary"
    30  	"encoding/hex"
    31  	"fmt"
    32  	"io"
    33  	"io/ioutil"
    34  	"sort"
    35  	"sync"
    36  
    37  	"github.com/ethereum/go-ethereum/metrics"
    38  	"github.com/ethereum/go-ethereum/rlp"
    39  	"github.com/ethereum/go-ethereum/swarm/log"
    40  	"github.com/ethereum/go-ethereum/swarm/storage/mock"
    41  	"github.com/syndtr/goleveldb/leveldb"
    42  	"github.com/syndtr/goleveldb/leveldb/opt"
    43  )
    44  
    45  const (
    46  	gcArrayFreeRatio = 0.1
    47  	maxGCitems       = 5000 // max number of items to be gc'd per call to collectGarbage()
    48  )
    49  
    50  var (
    51  	keyIndex       = byte(0)
    52  	keyOldData     = byte(1)
    53  	keyAccessCnt   = []byte{2}
    54  	keyEntryCnt    = []byte{3}
    55  	keyDataIdx     = []byte{4}
    56  	keyData        = byte(6)
    57  	keyDistanceCnt = byte(7)
    58  )
    59  
    60  type gcItem struct {
    61  	idx    uint64
    62  	value  uint64
    63  	idxKey []byte
    64  	po     uint8
    65  }
    66  
    67  type LDBStoreParams struct {
    68  	*StoreParams
    69  	Path string
    70  	Po   func(Address) uint8
    71  }
    72  
    73  // NewLDBStoreParams constructs LDBStoreParams with the specified values.
    74  func NewLDBStoreParams(storeparams *StoreParams, path string) *LDBStoreParams {
    75  	return &LDBStoreParams{
    76  		StoreParams: storeparams,
    77  		Path:        path,
    78  		Po:          func(k Address) (ret uint8) { return uint8(Proximity(storeparams.BaseKey[:], k[:])) },
    79  	}
    80  }
    81  
    82  type LDBStore struct {
    83  	db *LDBDatabase
    84  
    85  	// this should be stored in db, accessed transactionally
    86  	entryCnt  uint64 // number of items in the LevelDB
    87  	accessCnt uint64 // ever-accumulating number increased every time we read/access an entry
    88  	dataIdx   uint64 // similar to entryCnt, but we only increment it
    89  	capacity  uint64
    90  	bucketCnt []uint64
    91  
    92  	hashfunc SwarmHasher
    93  	po       func(Address) uint8
    94  
    95  	batchC   chan bool
    96  	batchesC chan struct{}
    97  	batch    *leveldb.Batch
    98  	lock     sync.RWMutex
    99  	quit     chan struct{}
   100  
   101  	// Functions encodeDataFunc is used to bypass
   102  	// the default functionality of DbStore with
   103  	// mock.NodeStore for testing purposes.
   104  	encodeDataFunc func(chunk *Chunk) []byte
   105  	// If getDataFunc is defined, it will be used for
   106  	// retrieving the chunk data instead from the local
   107  	// LevelDB database.
   108  	getDataFunc func(addr Address) (data []byte, err error)
   109  }
   110  
   111  // TODO: Instead of passing the distance function, just pass the address from which distances are calculated
   112  // to avoid the appearance of a pluggable distance metric and opportunities of bugs associated with providing
   113  // a function different from the one that is actually used.
   114  func NewLDBStore(params *LDBStoreParams) (s *LDBStore, err error) {
   115  	s = new(LDBStore)
   116  	s.hashfunc = params.Hash
   117  	s.quit = make(chan struct{})
   118  
   119  	s.batchC = make(chan bool)
   120  	s.batchesC = make(chan struct{}, 1)
   121  	go s.writeBatches()
   122  	s.batch = new(leveldb.Batch)
   123  	// associate encodeData with default functionality
   124  	s.encodeDataFunc = encodeData
   125  
   126  	s.db, err = NewLDBDatabase(params.Path)
   127  	if err != nil {
   128  		return nil, err
   129  	}
   130  
   131  	s.po = params.Po
   132  	s.setCapacity(params.DbCapacity)
   133  
   134  	s.bucketCnt = make([]uint64, 0x100)
   135  	for i := 0; i < 0x100; i++ {
   136  		k := make([]byte, 2)
   137  		k[0] = keyDistanceCnt
   138  		k[1] = uint8(i)
   139  		cnt, _ := s.db.Get(k)
   140  		s.bucketCnt[i] = BytesToU64(cnt)
   141  		s.bucketCnt[i]++
   142  	}
   143  	data, _ := s.db.Get(keyEntryCnt)
   144  	s.entryCnt = BytesToU64(data)
   145  	s.entryCnt++
   146  	data, _ = s.db.Get(keyAccessCnt)
   147  	s.accessCnt = BytesToU64(data)
   148  	s.accessCnt++
   149  	data, _ = s.db.Get(keyDataIdx)
   150  	s.dataIdx = BytesToU64(data)
   151  	s.dataIdx++
   152  
   153  	return s, nil
   154  }
   155  
   156  // NewMockDbStore creates a new instance of DbStore with
   157  // mockStore set to a provided value. If mockStore argument is nil,
   158  // this function behaves exactly as NewDbStore.
   159  func NewMockDbStore(params *LDBStoreParams, mockStore *mock.NodeStore) (s *LDBStore, err error) {
   160  	s, err = NewLDBStore(params)
   161  	if err != nil {
   162  		return nil, err
   163  	}
   164  
   165  	// replace put and get with mock store functionality
   166  	if mockStore != nil {
   167  		s.encodeDataFunc = newMockEncodeDataFunc(mockStore)
   168  		s.getDataFunc = newMockGetDataFunc(mockStore)
   169  	}
   170  	return
   171  }
   172  
   173  type dpaDBIndex struct {
   174  	Idx    uint64
   175  	Access uint64
   176  }
   177  
   178  func BytesToU64(data []byte) uint64 {
   179  	if len(data) < 8 {
   180  		return 0
   181  	}
   182  	return binary.BigEndian.Uint64(data)
   183  }
   184  
   185  func U64ToBytes(val uint64) []byte {
   186  	data := make([]byte, 8)
   187  	binary.BigEndian.PutUint64(data, val)
   188  	return data
   189  }
   190  
   191  func (s *LDBStore) updateIndexAccess(index *dpaDBIndex) {
   192  	index.Access = s.accessCnt
   193  }
   194  
   195  func getIndexKey(hash Address) []byte {
   196  	hashSize := len(hash)
   197  	key := make([]byte, hashSize+1)
   198  	key[0] = keyIndex
   199  	copy(key[1:], hash[:])
   200  	return key
   201  }
   202  
   203  func getOldDataKey(idx uint64) []byte {
   204  	key := make([]byte, 9)
   205  	key[0] = keyOldData
   206  	binary.BigEndian.PutUint64(key[1:9], idx)
   207  
   208  	return key
   209  }
   210  
   211  func getDataKey(idx uint64, po uint8) []byte {
   212  	key := make([]byte, 10)
   213  	key[0] = keyData
   214  	key[1] = po
   215  	binary.BigEndian.PutUint64(key[2:], idx)
   216  
   217  	return key
   218  }
   219  
   220  func encodeIndex(index *dpaDBIndex) []byte {
   221  	data, _ := rlp.EncodeToBytes(index)
   222  	return data
   223  }
   224  
   225  func encodeData(chunk *Chunk) []byte {
   226  	// Always create a new underlying array for the returned byte slice.
   227  	// The chunk.Key array may be used in the returned slice which
   228  	// may be changed later in the code or by the LevelDB, resulting
   229  	// that the Key is changed as well.
   230  	return append(append([]byte{}, chunk.Addr[:]...), chunk.SData...)
   231  }
   232  
   233  func decodeIndex(data []byte, index *dpaDBIndex) error {
   234  	dec := rlp.NewStream(bytes.NewReader(data), 0)
   235  	return dec.Decode(index)
   236  }
   237  
   238  func decodeData(data []byte, chunk *Chunk) {
   239  	chunk.SData = data[32:]
   240  	chunk.Size = int64(binary.BigEndian.Uint64(data[32:40]))
   241  }
   242  
   243  func decodeOldData(data []byte, chunk *Chunk) {
   244  	chunk.SData = data
   245  	chunk.Size = int64(binary.BigEndian.Uint64(data[0:8]))
   246  }
   247  
   248  func (s *LDBStore) collectGarbage(ratio float32) {
   249  	metrics.GetOrRegisterCounter("ldbstore.collectgarbage", nil).Inc(1)
   250  
   251  	it := s.db.NewIterator()
   252  	defer it.Release()
   253  
   254  	garbage := []*gcItem{}
   255  	gcnt := 0
   256  
   257  	for ok := it.Seek([]byte{keyIndex}); ok && (gcnt < maxGCitems) && (uint64(gcnt) < s.entryCnt); ok = it.Next() {
   258  		itkey := it.Key()
   259  
   260  		if (itkey == nil) || (itkey[0] != keyIndex) {
   261  			break
   262  		}
   263  
   264  		// it.Key() contents change on next call to it.Next(), so we must copy it
   265  		key := make([]byte, len(it.Key()))
   266  		copy(key, it.Key())
   267  
   268  		val := it.Value()
   269  
   270  		var index dpaDBIndex
   271  
   272  		hash := key[1:]
   273  		decodeIndex(val, &index)
   274  		po := s.po(hash)
   275  
   276  		gci := &gcItem{
   277  			idxKey: key,
   278  			idx:    index.Idx,
   279  			value:  index.Access, // the smaller, the more likely to be gc'd. see sort comparator below.
   280  			po:     po,
   281  		}
   282  
   283  		garbage = append(garbage, gci)
   284  		gcnt++
   285  	}
   286  
   287  	sort.Slice(garbage[:gcnt], func(i, j int) bool { return garbage[i].value < garbage[j].value })
   288  
   289  	cutoff := int(float32(gcnt) * ratio)
   290  	metrics.GetOrRegisterCounter("ldbstore.collectgarbage.delete", nil).Inc(int64(cutoff))
   291  
   292  	for i := 0; i < cutoff; i++ {
   293  		s.delete(garbage[i].idx, garbage[i].idxKey, garbage[i].po)
   294  	}
   295  }
   296  
   297  // Export writes all chunks from the store to a tar archive, returning the
   298  // number of chunks written.
   299  func (s *LDBStore) Export(out io.Writer) (int64, error) {
   300  	tw := tar.NewWriter(out)
   301  	defer tw.Close()
   302  
   303  	it := s.db.NewIterator()
   304  	defer it.Release()
   305  	var count int64
   306  	for ok := it.Seek([]byte{keyIndex}); ok; ok = it.Next() {
   307  		key := it.Key()
   308  		if (key == nil) || (key[0] != keyIndex) {
   309  			break
   310  		}
   311  
   312  		var index dpaDBIndex
   313  
   314  		hash := key[1:]
   315  		decodeIndex(it.Value(), &index)
   316  		po := s.po(hash)
   317  		datakey := getDataKey(index.Idx, po)
   318  		log.Trace("store.export", "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po)
   319  		data, err := s.db.Get(datakey)
   320  		if err != nil {
   321  			log.Warn(fmt.Sprintf("Chunk %x found but could not be accessed: %v", key[:], err))
   322  			continue
   323  		}
   324  
   325  		hdr := &tar.Header{
   326  			Name: hex.EncodeToString(hash),
   327  			Mode: 0644,
   328  			Size: int64(len(data)),
   329  		}
   330  		if err := tw.WriteHeader(hdr); err != nil {
   331  			return count, err
   332  		}
   333  		if _, err := tw.Write(data); err != nil {
   334  			return count, err
   335  		}
   336  		count++
   337  	}
   338  
   339  	return count, nil
   340  }
   341  
   342  // of chunks read.
   343  func (s *LDBStore) Import(in io.Reader) (int64, error) {
   344  	tr := tar.NewReader(in)
   345  
   346  	var count int64
   347  	var wg sync.WaitGroup
   348  	for {
   349  		hdr, err := tr.Next()
   350  		if err == io.EOF {
   351  			break
   352  		} else if err != nil {
   353  			return count, err
   354  		}
   355  
   356  		if len(hdr.Name) != 64 {
   357  			log.Warn("ignoring non-chunk file", "name", hdr.Name)
   358  			continue
   359  		}
   360  
   361  		keybytes, err := hex.DecodeString(hdr.Name)
   362  		if err != nil {
   363  			log.Warn("ignoring invalid chunk file", "name", hdr.Name, "err", err)
   364  			continue
   365  		}
   366  
   367  		data, err := ioutil.ReadAll(tr)
   368  		if err != nil {
   369  			return count, err
   370  		}
   371  		key := Address(keybytes)
   372  		chunk := NewChunk(key, nil)
   373  		chunk.SData = data[32:]
   374  		s.Put(context.TODO(), chunk)
   375  		wg.Add(1)
   376  		go func() {
   377  			defer wg.Done()
   378  			<-chunk.dbStoredC
   379  		}()
   380  		count++
   381  	}
   382  	wg.Wait()
   383  	return count, nil
   384  }
   385  
   386  func (s *LDBStore) Cleanup() {
   387  	//Iterates over the database and checks that there are no faulty chunks
   388  	it := s.db.NewIterator()
   389  	startPosition := []byte{keyIndex}
   390  	it.Seek(startPosition)
   391  	var key []byte
   392  	var errorsFound, total int
   393  	for it.Valid() {
   394  		key = it.Key()
   395  		if (key == nil) || (key[0] != keyIndex) {
   396  			break
   397  		}
   398  		total++
   399  		var index dpaDBIndex
   400  		err := decodeIndex(it.Value(), &index)
   401  		if err != nil {
   402  			it.Next()
   403  			continue
   404  		}
   405  		data, err := s.db.Get(getDataKey(index.Idx, s.po(Address(key[1:]))))
   406  		if err != nil {
   407  			log.Warn(fmt.Sprintf("Chunk %x found but could not be accessed: %v", key[:], err))
   408  			s.delete(index.Idx, getIndexKey(key[1:]), s.po(Address(key[1:])))
   409  			errorsFound++
   410  		} else {
   411  			hasher := s.hashfunc()
   412  			hasher.Write(data[32:])
   413  			hash := hasher.Sum(nil)
   414  			if !bytes.Equal(hash, key[1:]) {
   415  				log.Warn(fmt.Sprintf("Found invalid chunk. Hash mismatch. hash=%x, key=%x", hash, key[:]))
   416  				s.delete(index.Idx, getIndexKey(key[1:]), s.po(Address(key[1:])))
   417  			}
   418  		}
   419  		it.Next()
   420  	}
   421  	it.Release()
   422  	log.Warn(fmt.Sprintf("Found %v errors out of %v entries", errorsFound, total))
   423  }
   424  
   425  func (s *LDBStore) ReIndex() {
   426  	//Iterates over the database and checks that there are no faulty chunks
   427  	it := s.db.NewIterator()
   428  	startPosition := []byte{keyOldData}
   429  	it.Seek(startPosition)
   430  	var key []byte
   431  	var errorsFound, total int
   432  	for it.Valid() {
   433  		key = it.Key()
   434  		if (key == nil) || (key[0] != keyOldData) {
   435  			break
   436  		}
   437  		data := it.Value()
   438  		hasher := s.hashfunc()
   439  		hasher.Write(data)
   440  		hash := hasher.Sum(nil)
   441  
   442  		newKey := make([]byte, 10)
   443  		oldCntKey := make([]byte, 2)
   444  		newCntKey := make([]byte, 2)
   445  		oldCntKey[0] = keyDistanceCnt
   446  		newCntKey[0] = keyDistanceCnt
   447  		key[0] = keyData
   448  		key[1] = s.po(Address(key[1:]))
   449  		oldCntKey[1] = key[1]
   450  		newCntKey[1] = s.po(Address(newKey[1:]))
   451  		copy(newKey[2:], key[1:])
   452  		newValue := append(hash, data...)
   453  
   454  		batch := new(leveldb.Batch)
   455  		batch.Delete(key)
   456  		s.bucketCnt[oldCntKey[1]]--
   457  		batch.Put(oldCntKey, U64ToBytes(s.bucketCnt[oldCntKey[1]]))
   458  		batch.Put(newKey, newValue)
   459  		s.bucketCnt[newCntKey[1]]++
   460  		batch.Put(newCntKey, U64ToBytes(s.bucketCnt[newCntKey[1]]))
   461  		s.db.Write(batch)
   462  		it.Next()
   463  	}
   464  	it.Release()
   465  	log.Warn(fmt.Sprintf("Found %v errors out of %v entries", errorsFound, total))
   466  }
   467  
   468  func (s *LDBStore) delete(idx uint64, idxKey []byte, po uint8) {
   469  	metrics.GetOrRegisterCounter("ldbstore.delete", nil).Inc(1)
   470  
   471  	batch := new(leveldb.Batch)
   472  	batch.Delete(idxKey)
   473  	batch.Delete(getDataKey(idx, po))
   474  	s.entryCnt--
   475  	s.bucketCnt[po]--
   476  	cntKey := make([]byte, 2)
   477  	cntKey[0] = keyDistanceCnt
   478  	cntKey[1] = po
   479  	batch.Put(keyEntryCnt, U64ToBytes(s.entryCnt))
   480  	batch.Put(cntKey, U64ToBytes(s.bucketCnt[po]))
   481  	s.db.Write(batch)
   482  }
   483  
   484  func (s *LDBStore) CurrentBucketStorageIndex(po uint8) uint64 {
   485  	s.lock.RLock()
   486  	defer s.lock.RUnlock()
   487  
   488  	return s.bucketCnt[po]
   489  }
   490  
   491  func (s *LDBStore) Size() uint64 {
   492  	s.lock.Lock()
   493  	defer s.lock.Unlock()
   494  	return s.entryCnt
   495  }
   496  
   497  func (s *LDBStore) CurrentStorageIndex() uint64 {
   498  	s.lock.RLock()
   499  	defer s.lock.RUnlock()
   500  	return s.dataIdx
   501  }
   502  
   503  func (s *LDBStore) Put(ctx context.Context, chunk *Chunk) {
   504  	metrics.GetOrRegisterCounter("ldbstore.put", nil).Inc(1)
   505  	log.Trace("ldbstore.put", "key", chunk.Addr)
   506  
   507  	ikey := getIndexKey(chunk.Addr)
   508  	var index dpaDBIndex
   509  
   510  	po := s.po(chunk.Addr)
   511  	s.lock.Lock()
   512  	defer s.lock.Unlock()
   513  
   514  	log.Trace("ldbstore.put: s.db.Get", "key", chunk.Addr, "ikey", fmt.Sprintf("%x", ikey))
   515  	idata, err := s.db.Get(ikey)
   516  	if err != nil {
   517  		s.doPut(chunk, &index, po)
   518  		batchC := s.batchC
   519  		go func() {
   520  			<-batchC
   521  			chunk.markAsStored()
   522  		}()
   523  	} else {
   524  		log.Trace("ldbstore.put: chunk already exists, only update access", "key", chunk.Addr)
   525  		decodeIndex(idata, &index)
   526  		chunk.markAsStored()
   527  	}
   528  	index.Access = s.accessCnt
   529  	s.accessCnt++
   530  	idata = encodeIndex(&index)
   531  	s.batch.Put(ikey, idata)
   532  	select {
   533  	case s.batchesC <- struct{}{}:
   534  	default:
   535  	}
   536  }
   537  
   538  // force putting into db, does not check access index
   539  func (s *LDBStore) doPut(chunk *Chunk, index *dpaDBIndex, po uint8) {
   540  	data := s.encodeDataFunc(chunk)
   541  	dkey := getDataKey(s.dataIdx, po)
   542  	s.batch.Put(dkey, data)
   543  	index.Idx = s.dataIdx
   544  	s.bucketCnt[po] = s.dataIdx
   545  	s.entryCnt++
   546  	s.dataIdx++
   547  
   548  	cntKey := make([]byte, 2)
   549  	cntKey[0] = keyDistanceCnt
   550  	cntKey[1] = po
   551  	s.batch.Put(cntKey, U64ToBytes(s.bucketCnt[po]))
   552  }
   553  
   554  func (s *LDBStore) writeBatches() {
   555  mainLoop:
   556  	for {
   557  		select {
   558  		case <-s.quit:
   559  			break mainLoop
   560  		case <-s.batchesC:
   561  			s.lock.Lock()
   562  			b := s.batch
   563  			e := s.entryCnt
   564  			d := s.dataIdx
   565  			a := s.accessCnt
   566  			c := s.batchC
   567  			s.batchC = make(chan bool)
   568  			s.batch = new(leveldb.Batch)
   569  			err := s.writeBatch(b, e, d, a)
   570  			// TODO: set this error on the batch, then tell the chunk
   571  			if err != nil {
   572  				log.Error(fmt.Sprintf("spawn batch write (%d entries): %v", b.Len(), err))
   573  			}
   574  			close(c)
   575  			for e > s.capacity {
   576  				// Collect garbage in a separate goroutine
   577  				// to be able to interrupt this loop by s.quit.
   578  				done := make(chan struct{})
   579  				go func() {
   580  					s.collectGarbage(gcArrayFreeRatio)
   581  					close(done)
   582  				}()
   583  
   584  				e = s.entryCnt
   585  				select {
   586  				case <-s.quit:
   587  					s.lock.Unlock()
   588  					break mainLoop
   589  				case <-done:
   590  				}
   591  			}
   592  			s.lock.Unlock()
   593  		}
   594  	}
   595  	log.Trace(fmt.Sprintf("DbStore: quit batch write loop"))
   596  }
   597  
   598  // must be called non concurrently
   599  func (s *LDBStore) writeBatch(b *leveldb.Batch, entryCnt, dataIdx, accessCnt uint64) error {
   600  	b.Put(keyEntryCnt, U64ToBytes(entryCnt))
   601  	b.Put(keyDataIdx, U64ToBytes(dataIdx))
   602  	b.Put(keyAccessCnt, U64ToBytes(accessCnt))
   603  	l := b.Len()
   604  	if err := s.db.Write(b); err != nil {
   605  		return fmt.Errorf("unable to write batch: %v", err)
   606  	}
   607  	log.Trace(fmt.Sprintf("batch write (%d entries)", l))
   608  	return nil
   609  }
   610  
   611  // newMockEncodeDataFunc returns a function that stores the chunk data
   612  // to a mock store to bypass the default functionality encodeData.
   613  // The constructed function always returns the nil data, as DbStore does
   614  // not need to store the data, but still need to create the index.
   615  func newMockEncodeDataFunc(mockStore *mock.NodeStore) func(chunk *Chunk) []byte {
   616  	return func(chunk *Chunk) []byte {
   617  		if err := mockStore.Put(chunk.Addr, encodeData(chunk)); err != nil {
   618  			log.Error(fmt.Sprintf("%T: Chunk %v put: %v", mockStore, chunk.Addr.Log(), err))
   619  		}
   620  		return chunk.Addr[:]
   621  	}
   622  }
   623  
   624  // try to find index; if found, update access cnt and return true
   625  func (s *LDBStore) tryAccessIdx(ikey []byte, index *dpaDBIndex) bool {
   626  	idata, err := s.db.Get(ikey)
   627  	if err != nil {
   628  		return false
   629  	}
   630  	decodeIndex(idata, index)
   631  	s.batch.Put(keyAccessCnt, U64ToBytes(s.accessCnt))
   632  	s.accessCnt++
   633  	index.Access = s.accessCnt
   634  	idata = encodeIndex(index)
   635  	s.batch.Put(ikey, idata)
   636  	select {
   637  	case s.batchesC <- struct{}{}:
   638  	default:
   639  	}
   640  	return true
   641  }
   642  
   643  func (s *LDBStore) Get(ctx context.Context, addr Address) (chunk *Chunk, err error) {
   644  	metrics.GetOrRegisterCounter("ldbstore.get", nil).Inc(1)
   645  	log.Trace("ldbstore.get", "key", addr)
   646  
   647  	s.lock.Lock()
   648  	defer s.lock.Unlock()
   649  	return s.get(addr)
   650  }
   651  
   652  func (s *LDBStore) get(addr Address) (chunk *Chunk, err error) {
   653  	var indx dpaDBIndex
   654  
   655  	if s.tryAccessIdx(getIndexKey(addr), &indx) {
   656  		var data []byte
   657  		if s.getDataFunc != nil {
   658  			// if getDataFunc is defined, use it to retrieve the chunk data
   659  			log.Trace("ldbstore.get retrieve with getDataFunc", "key", addr)
   660  			data, err = s.getDataFunc(addr)
   661  			if err != nil {
   662  				return
   663  			}
   664  		} else {
   665  			// default DbStore functionality to retrieve chunk data
   666  			proximity := s.po(addr)
   667  			datakey := getDataKey(indx.Idx, proximity)
   668  			data, err = s.db.Get(datakey)
   669  			log.Trace("ldbstore.get retrieve", "key", addr, "indexkey", indx.Idx, "datakey", fmt.Sprintf("%x", datakey), "proximity", proximity)
   670  			if err != nil {
   671  				log.Trace("ldbstore.get chunk found but could not be accessed", "key", addr, "err", err)
   672  				s.delete(indx.Idx, getIndexKey(addr), s.po(addr))
   673  				return
   674  			}
   675  		}
   676  
   677  		chunk = NewChunk(addr, nil)
   678  		chunk.markAsStored()
   679  		decodeData(data, chunk)
   680  	} else {
   681  		err = ErrChunkNotFound
   682  	}
   683  
   684  	return
   685  }
   686  
   687  // newMockGetFunc returns a function that reads chunk data from
   688  // the mock database, which is used as the value for DbStore.getFunc
   689  // to bypass the default functionality of DbStore with a mock store.
   690  func newMockGetDataFunc(mockStore *mock.NodeStore) func(addr Address) (data []byte, err error) {
   691  	return func(addr Address) (data []byte, err error) {
   692  		data, err = mockStore.Get(addr)
   693  		if err == mock.ErrNotFound {
   694  			// preserve ErrChunkNotFound error
   695  			err = ErrChunkNotFound
   696  		}
   697  		return data, err
   698  	}
   699  }
   700  
   701  func (s *LDBStore) updateAccessCnt(addr Address) {
   702  
   703  	s.lock.Lock()
   704  	defer s.lock.Unlock()
   705  
   706  	var index dpaDBIndex
   707  	s.tryAccessIdx(getIndexKey(addr), &index) // result_chn == nil, only update access cnt
   708  
   709  }
   710  
   711  func (s *LDBStore) setCapacity(c uint64) {
   712  	s.lock.Lock()
   713  	defer s.lock.Unlock()
   714  
   715  	s.capacity = c
   716  
   717  	if s.entryCnt > c {
   718  		ratio := float32(1.01) - float32(c)/float32(s.entryCnt)
   719  		if ratio < gcArrayFreeRatio {
   720  			ratio = gcArrayFreeRatio
   721  		}
   722  		if ratio > 1 {
   723  			ratio = 1
   724  		}
   725  		for s.entryCnt > c {
   726  			s.collectGarbage(ratio)
   727  		}
   728  	}
   729  }
   730  
   731  func (s *LDBStore) Close() {
   732  	close(s.quit)
   733  	s.db.Close()
   734  }
   735  
   736  // SyncIterator(start, stop, po, f) calls f on each hash of a bin po from start to stop
   737  func (s *LDBStore) SyncIterator(since uint64, until uint64, po uint8, f func(Address, uint64) bool) error {
   738  	metrics.GetOrRegisterCounter("ldbstore.synciterator", nil).Inc(1)
   739  
   740  	sincekey := getDataKey(since, po)
   741  	untilkey := getDataKey(until, po)
   742  	it := s.db.NewIterator()
   743  	defer it.Release()
   744  
   745  	for ok := it.Seek(sincekey); ok; ok = it.Next() {
   746  		metrics.GetOrRegisterCounter("ldbstore.synciterator.seek", nil).Inc(1)
   747  
   748  		dbkey := it.Key()
   749  		if dbkey[0] != keyData || dbkey[1] != po || bytes.Compare(untilkey, dbkey) < 0 {
   750  			break
   751  		}
   752  		key := make([]byte, 32)
   753  		val := it.Value()
   754  		copy(key, val[:32])
   755  		if !f(Address(key), binary.BigEndian.Uint64(dbkey[2:])) {
   756  			break
   757  		}
   758  	}
   759  	return it.Error()
   760  }
   761  
   762  func databaseExists(path string) bool {
   763  	o := &opt.Options{
   764  		ErrorIfMissing: true,
   765  	}
   766  	tdb, err := leveldb.OpenFile(path, o)
   767  	if err != nil {
   768  		return false
   769  	}
   770  	defer tdb.Close()
   771  	return true
   772  }