github.com/FUSIONFoundation/efsn@v3.6.2-0.20200916075423-dbb5dd5d2cc7+incompatible/swarm/storage/ldbstore.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // disk storage layer for the package bzz
    18  // DbStore implements the ChunkStore interface and is used by the FileStore as
    19  // persistent storage of chunks
    20  // it implements purging based on access count allowing for external control of
    21  // max capacity
    22  
    23  package storage
    24  
    25  import (
    26  	"archive/tar"
    27  	"bytes"
    28  	"context"
    29  	"encoding/binary"
    30  	"encoding/hex"
    31  	"errors"
    32  	"fmt"
    33  	"io"
    34  	"io/ioutil"
    35  	"sort"
    36  	"sync"
    37  
    38  	"github.com/FusionFoundation/efsn/metrics"
    39  	"github.com/FusionFoundation/efsn/rlp"
    40  	ch "github.com/FusionFoundation/efsn/swarm/chunk"
    41  	"github.com/FusionFoundation/efsn/swarm/log"
    42  	"github.com/FusionFoundation/efsn/swarm/storage/mock"
    43  	"github.com/syndtr/goleveldb/leveldb"
    44  	"github.com/syndtr/goleveldb/leveldb/opt"
    45  )
    46  
    47  const (
    48  	gcArrayFreeRatio = 0.1
    49  	maxGCitems       = 5000 // max number of items to be gc'd per call to collectGarbage()
    50  )
    51  
    52  var (
    53  	dbEntryCount = metrics.NewRegisteredCounter("ldbstore.entryCnt", nil)
    54  )
    55  
    56  var (
    57  	keyIndex       = byte(0)
    58  	keyOldData     = byte(1)
    59  	keyAccessCnt   = []byte{2}
    60  	keyEntryCnt    = []byte{3}
    61  	keyDataIdx     = []byte{4}
    62  	keyData        = byte(6)
    63  	keyDistanceCnt = byte(7)
    64  )
    65  
    66  var (
    67  	ErrDBClosed = errors.New("LDBStore closed")
    68  )
    69  
    70  type gcItem struct {
    71  	idx    uint64
    72  	value  uint64
    73  	idxKey []byte
    74  	po     uint8
    75  }
    76  
    77  type LDBStoreParams struct {
    78  	*StoreParams
    79  	Path string
    80  	Po   func(Address) uint8
    81  }
    82  
    83  // NewLDBStoreParams constructs LDBStoreParams with the specified values.
    84  func NewLDBStoreParams(storeparams *StoreParams, path string) *LDBStoreParams {
    85  	return &LDBStoreParams{
    86  		StoreParams: storeparams,
    87  		Path:        path,
    88  		Po:          func(k Address) (ret uint8) { return uint8(Proximity(storeparams.BaseKey, k[:])) },
    89  	}
    90  }
    91  
    92  type LDBStore struct {
    93  	db *LDBDatabase
    94  
    95  	// this should be stored in db, accessed transactionally
    96  	entryCnt  uint64 // number of items in the LevelDB
    97  	accessCnt uint64 // ever-accumulating number increased every time we read/access an entry
    98  	dataIdx   uint64 // similar to entryCnt, but we only increment it
    99  	capacity  uint64
   100  	bucketCnt []uint64
   101  
   102  	hashfunc SwarmHasher
   103  	po       func(Address) uint8
   104  
   105  	batchC   chan bool
   106  	batchesC chan struct{}
   107  	closed   bool
   108  	batch    *dbBatch
   109  	lock     sync.RWMutex
   110  	quit     chan struct{}
   111  
   112  	// Functions encodeDataFunc is used to bypass
   113  	// the default functionality of DbStore with
   114  	// mock.NodeStore for testing purposes.
   115  	encodeDataFunc func(chunk Chunk) []byte
   116  	// If getDataFunc is defined, it will be used for
   117  	// retrieving the chunk data instead from the local
   118  	// LevelDB database.
   119  	getDataFunc func(key Address) (data []byte, err error)
   120  }
   121  
   122  type dbBatch struct {
   123  	*leveldb.Batch
   124  	err error
   125  	c   chan struct{}
   126  }
   127  
   128  func newBatch() *dbBatch {
   129  	return &dbBatch{Batch: new(leveldb.Batch), c: make(chan struct{})}
   130  }
   131  
   132  // TODO: Instead of passing the distance function, just pass the address from which distances are calculated
   133  // to avoid the appearance of a pluggable distance metric and opportunities of bugs associated with providing
   134  // a function different from the one that is actually used.
   135  func NewLDBStore(params *LDBStoreParams) (s *LDBStore, err error) {
   136  	s = new(LDBStore)
   137  	s.hashfunc = params.Hash
   138  	s.quit = make(chan struct{})
   139  
   140  	s.batchesC = make(chan struct{}, 1)
   141  	go s.writeBatches()
   142  	s.batch = newBatch()
   143  	// associate encodeData with default functionality
   144  	s.encodeDataFunc = encodeData
   145  
   146  	s.db, err = NewLDBDatabase(params.Path)
   147  	if err != nil {
   148  		return nil, err
   149  	}
   150  
   151  	s.po = params.Po
   152  	s.setCapacity(params.DbCapacity)
   153  
   154  	s.bucketCnt = make([]uint64, 0x100)
   155  	for i := 0; i < 0x100; i++ {
   156  		k := make([]byte, 2)
   157  		k[0] = keyDistanceCnt
   158  		k[1] = uint8(i)
   159  		cnt, _ := s.db.Get(k)
   160  		s.bucketCnt[i] = BytesToU64(cnt)
   161  	}
   162  	data, _ := s.db.Get(keyEntryCnt)
   163  	s.entryCnt = BytesToU64(data)
   164  	data, _ = s.db.Get(keyAccessCnt)
   165  	s.accessCnt = BytesToU64(data)
   166  	data, _ = s.db.Get(keyDataIdx)
   167  	s.dataIdx = BytesToU64(data)
   168  
   169  	return s, nil
   170  }
   171  
   172  // NewMockDbStore creates a new instance of DbStore with
   173  // mockStore set to a provided value. If mockStore argument is nil,
   174  // this function behaves exactly as NewDbStore.
   175  func NewMockDbStore(params *LDBStoreParams, mockStore *mock.NodeStore) (s *LDBStore, err error) {
   176  	s, err = NewLDBStore(params)
   177  	if err != nil {
   178  		return nil, err
   179  	}
   180  
   181  	// replace put and get with mock store functionality
   182  	if mockStore != nil {
   183  		s.encodeDataFunc = newMockEncodeDataFunc(mockStore)
   184  		s.getDataFunc = newMockGetDataFunc(mockStore)
   185  	}
   186  	return
   187  }
   188  
   189  type dpaDBIndex struct {
   190  	Idx    uint64
   191  	Access uint64
   192  }
   193  
   194  func BytesToU64(data []byte) uint64 {
   195  	if len(data) < 8 {
   196  		return 0
   197  	}
   198  	return binary.BigEndian.Uint64(data)
   199  }
   200  
   201  func U64ToBytes(val uint64) []byte {
   202  	data := make([]byte, 8)
   203  	binary.BigEndian.PutUint64(data, val)
   204  	return data
   205  }
   206  
   207  func (s *LDBStore) updateIndexAccess(index *dpaDBIndex) {
   208  	index.Access = s.accessCnt
   209  }
   210  
   211  func getIndexKey(hash Address) []byte {
   212  	hashSize := len(hash)
   213  	key := make([]byte, hashSize+1)
   214  	key[0] = keyIndex
   215  	copy(key[1:], hash[:])
   216  	return key
   217  }
   218  
   219  func getDataKey(idx uint64, po uint8) []byte {
   220  	key := make([]byte, 10)
   221  	key[0] = keyData
   222  	key[1] = po
   223  	binary.BigEndian.PutUint64(key[2:], idx)
   224  
   225  	return key
   226  }
   227  
   228  func encodeIndex(index *dpaDBIndex) []byte {
   229  	data, _ := rlp.EncodeToBytes(index)
   230  	return data
   231  }
   232  
   233  func encodeData(chunk Chunk) []byte {
   234  	// Always create a new underlying array for the returned byte slice.
   235  	// The chunk.Address array may be used in the returned slice which
   236  	// may be changed later in the code or by the LevelDB, resulting
   237  	// that the Address is changed as well.
   238  	return append(append([]byte{}, chunk.Address()[:]...), chunk.Data()...)
   239  }
   240  
   241  func decodeIndex(data []byte, index *dpaDBIndex) error {
   242  	dec := rlp.NewStream(bytes.NewReader(data), 0)
   243  	return dec.Decode(index)
   244  }
   245  
   246  func decodeData(addr Address, data []byte) (*chunk, error) {
   247  	return NewChunk(addr, data[32:]), nil
   248  }
   249  
   250  func (s *LDBStore) collectGarbage(ratio float32) {
   251  	log.Trace("collectGarbage", "ratio", ratio)
   252  
   253  	metrics.GetOrRegisterCounter("ldbstore.collectgarbage", nil).Inc(1)
   254  
   255  	it := s.db.NewIterator()
   256  	defer it.Release()
   257  
   258  	garbage := []*gcItem{}
   259  	gcnt := 0
   260  
   261  	for ok := it.Seek([]byte{keyIndex}); ok && (gcnt < maxGCitems) && (uint64(gcnt) < s.entryCnt); ok = it.Next() {
   262  		itkey := it.Key()
   263  
   264  		if (itkey == nil) || (itkey[0] != keyIndex) {
   265  			break
   266  		}
   267  
   268  		// it.Key() contents change on next call to it.Next(), so we must copy it
   269  		key := make([]byte, len(it.Key()))
   270  		copy(key, it.Key())
   271  
   272  		val := it.Value()
   273  
   274  		var index dpaDBIndex
   275  
   276  		hash := key[1:]
   277  		decodeIndex(val, &index)
   278  		po := s.po(hash)
   279  
   280  		gci := &gcItem{
   281  			idxKey: key,
   282  			idx:    index.Idx,
   283  			value:  index.Access, // the smaller, the more likely to be gc'd. see sort comparator below.
   284  			po:     po,
   285  		}
   286  
   287  		garbage = append(garbage, gci)
   288  		gcnt++
   289  	}
   290  
   291  	sort.Slice(garbage[:gcnt], func(i, j int) bool { return garbage[i].value < garbage[j].value })
   292  
   293  	cutoff := int(float32(gcnt) * ratio)
   294  	metrics.GetOrRegisterCounter("ldbstore.collectgarbage.delete", nil).Inc(int64(cutoff))
   295  
   296  	for i := 0; i < cutoff; i++ {
   297  		s.delete(garbage[i].idx, garbage[i].idxKey, garbage[i].po)
   298  	}
   299  }
   300  
   301  // Export writes all chunks from the store to a tar archive, returning the
   302  // number of chunks written.
   303  func (s *LDBStore) Export(out io.Writer) (int64, error) {
   304  	tw := tar.NewWriter(out)
   305  	defer tw.Close()
   306  
   307  	it := s.db.NewIterator()
   308  	defer it.Release()
   309  	var count int64
   310  	for ok := it.Seek([]byte{keyIndex}); ok; ok = it.Next() {
   311  		key := it.Key()
   312  		if (key == nil) || (key[0] != keyIndex) {
   313  			break
   314  		}
   315  
   316  		var index dpaDBIndex
   317  
   318  		hash := key[1:]
   319  		decodeIndex(it.Value(), &index)
   320  		po := s.po(hash)
   321  		datakey := getDataKey(index.Idx, po)
   322  		log.Trace("store.export", "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po)
   323  		data, err := s.db.Get(datakey)
   324  		if err != nil {
   325  			log.Warn(fmt.Sprintf("Chunk %x found but could not be accessed: %v", key, err))
   326  			continue
   327  		}
   328  
   329  		hdr := &tar.Header{
   330  			Name: hex.EncodeToString(hash),
   331  			Mode: 0644,
   332  			Size: int64(len(data)),
   333  		}
   334  		if err := tw.WriteHeader(hdr); err != nil {
   335  			return count, err
   336  		}
   337  		if _, err := tw.Write(data); err != nil {
   338  			return count, err
   339  		}
   340  		count++
   341  	}
   342  
   343  	return count, nil
   344  }
   345  
   346  // of chunks read.
   347  func (s *LDBStore) Import(in io.Reader) (int64, error) {
   348  	tr := tar.NewReader(in)
   349  
   350  	ctx, cancel := context.WithCancel(context.Background())
   351  	defer cancel()
   352  
   353  	countC := make(chan int64)
   354  	errC := make(chan error)
   355  	var count int64
   356  	go func() {
   357  		for {
   358  			hdr, err := tr.Next()
   359  			if err == io.EOF {
   360  				break
   361  			} else if err != nil {
   362  				select {
   363  				case errC <- err:
   364  				case <-ctx.Done():
   365  				}
   366  			}
   367  
   368  			if len(hdr.Name) != 64 {
   369  				log.Warn("ignoring non-chunk file", "name", hdr.Name)
   370  				continue
   371  			}
   372  
   373  			keybytes, err := hex.DecodeString(hdr.Name)
   374  			if err != nil {
   375  				log.Warn("ignoring invalid chunk file", "name", hdr.Name, "err", err)
   376  				continue
   377  			}
   378  
   379  			data, err := ioutil.ReadAll(tr)
   380  			if err != nil {
   381  				select {
   382  				case errC <- err:
   383  				case <-ctx.Done():
   384  				}
   385  			}
   386  			key := Address(keybytes)
   387  			chunk := NewChunk(key, data[32:])
   388  
   389  			go func() {
   390  				select {
   391  				case errC <- s.Put(ctx, chunk):
   392  				case <-ctx.Done():
   393  				}
   394  			}()
   395  
   396  			count++
   397  		}
   398  		countC <- count
   399  	}()
   400  
   401  	// wait for all chunks to be stored
   402  	i := int64(0)
   403  	var total int64
   404  	for {
   405  		select {
   406  		case err := <-errC:
   407  			if err != nil {
   408  				return count, err
   409  			}
   410  			i++
   411  		case total = <-countC:
   412  		case <-ctx.Done():
   413  			return i, ctx.Err()
   414  		}
   415  		if total > 0 && i == total {
   416  			return total, nil
   417  		}
   418  	}
   419  }
   420  
   421  func (s *LDBStore) Cleanup() {
   422  	//Iterates over the database and checks that there are no chunks bigger than 4kb
   423  	var errorsFound, removed, total int
   424  
   425  	it := s.db.NewIterator()
   426  	defer it.Release()
   427  	for ok := it.Seek([]byte{keyIndex}); ok; ok = it.Next() {
   428  		key := it.Key()
   429  		if (key == nil) || (key[0] != keyIndex) {
   430  			break
   431  		}
   432  		total++
   433  		var index dpaDBIndex
   434  		err := decodeIndex(it.Value(), &index)
   435  		if err != nil {
   436  			log.Warn("Cannot decode")
   437  			errorsFound++
   438  			continue
   439  		}
   440  		hash := key[1:]
   441  		po := s.po(hash)
   442  		datakey := getDataKey(index.Idx, po)
   443  		data, err := s.db.Get(datakey)
   444  		if err != nil {
   445  			found := false
   446  
   447  			// highest possible proximity is 255
   448  			for po = 1; po <= 255; po++ {
   449  				datakey = getDataKey(index.Idx, po)
   450  				data, err = s.db.Get(datakey)
   451  				if err == nil {
   452  					found = true
   453  					break
   454  				}
   455  			}
   456  
   457  			if !found {
   458  				log.Warn(fmt.Sprintf("Chunk %x found but count not be accessed with any po", key))
   459  				errorsFound++
   460  				continue
   461  			}
   462  		}
   463  
   464  		ck := data[:32]
   465  		c, err := decodeData(ck, data)
   466  		if err != nil {
   467  			log.Error("decodeData error", "err", err)
   468  			continue
   469  		}
   470  
   471  		cs := int64(binary.LittleEndian.Uint64(c.sdata[:8]))
   472  		log.Trace("chunk", "key", fmt.Sprintf("%x", key), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.sdata), "size", cs)
   473  
   474  		if len(c.sdata) > ch.DefaultSize+8 {
   475  			log.Warn("chunk for cleanup", "key", fmt.Sprintf("%x", key), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.sdata), "size", cs)
   476  			s.delete(index.Idx, getIndexKey(key[1:]), po)
   477  			removed++
   478  			errorsFound++
   479  		}
   480  	}
   481  
   482  	log.Warn(fmt.Sprintf("Found %v errors out of %v entries. Removed %v chunks.", errorsFound, total, removed))
   483  }
   484  
   485  func (s *LDBStore) ReIndex() {
   486  	//Iterates over the database and checks that there are no faulty chunks
   487  	it := s.db.NewIterator()
   488  	startPosition := []byte{keyOldData}
   489  	it.Seek(startPosition)
   490  	var key []byte
   491  	var errorsFound, total int
   492  	for it.Valid() {
   493  		key = it.Key()
   494  		if (key == nil) || (key[0] != keyOldData) {
   495  			break
   496  		}
   497  		data := it.Value()
   498  		hasher := s.hashfunc()
   499  		hasher.Write(data)
   500  		hash := hasher.Sum(nil)
   501  
   502  		newKey := make([]byte, 10)
   503  		oldCntKey := make([]byte, 2)
   504  		newCntKey := make([]byte, 2)
   505  		oldCntKey[0] = keyDistanceCnt
   506  		newCntKey[0] = keyDistanceCnt
   507  		key[0] = keyData
   508  		key[1] = s.po(Address(key[1:]))
   509  		oldCntKey[1] = key[1]
   510  		newCntKey[1] = s.po(Address(newKey[1:]))
   511  		copy(newKey[2:], key[1:])
   512  		newValue := append(hash, data...)
   513  
   514  		batch := new(leveldb.Batch)
   515  		batch.Delete(key)
   516  		s.bucketCnt[oldCntKey[1]]--
   517  		batch.Put(oldCntKey, U64ToBytes(s.bucketCnt[oldCntKey[1]]))
   518  		batch.Put(newKey, newValue)
   519  		s.bucketCnt[newCntKey[1]]++
   520  		batch.Put(newCntKey, U64ToBytes(s.bucketCnt[newCntKey[1]]))
   521  		s.db.Write(batch)
   522  		it.Next()
   523  	}
   524  	it.Release()
   525  	log.Warn(fmt.Sprintf("Found %v errors out of %v entries", errorsFound, total))
   526  }
   527  
   528  func (s *LDBStore) Delete(addr Address) {
   529  	s.lock.Lock()
   530  	defer s.lock.Unlock()
   531  
   532  	ikey := getIndexKey(addr)
   533  
   534  	var indx dpaDBIndex
   535  	s.tryAccessIdx(ikey, &indx)
   536  
   537  	s.delete(indx.Idx, ikey, s.po(addr))
   538  }
   539  
   540  func (s *LDBStore) delete(idx uint64, idxKey []byte, po uint8) {
   541  	metrics.GetOrRegisterCounter("ldbstore.delete", nil).Inc(1)
   542  
   543  	batch := new(leveldb.Batch)
   544  	batch.Delete(idxKey)
   545  	batch.Delete(getDataKey(idx, po))
   546  	s.entryCnt--
   547  	dbEntryCount.Dec(1)
   548  	cntKey := make([]byte, 2)
   549  	cntKey[0] = keyDistanceCnt
   550  	cntKey[1] = po
   551  	batch.Put(keyEntryCnt, U64ToBytes(s.entryCnt))
   552  	batch.Put(cntKey, U64ToBytes(s.bucketCnt[po]))
   553  	s.db.Write(batch)
   554  }
   555  
   556  func (s *LDBStore) BinIndex(po uint8) uint64 {
   557  	s.lock.RLock()
   558  	defer s.lock.RUnlock()
   559  	return s.bucketCnt[po]
   560  }
   561  
   562  func (s *LDBStore) Size() uint64 {
   563  	s.lock.RLock()
   564  	defer s.lock.RUnlock()
   565  	return s.entryCnt
   566  }
   567  
   568  func (s *LDBStore) CurrentStorageIndex() uint64 {
   569  	s.lock.RLock()
   570  	defer s.lock.RUnlock()
   571  	return s.dataIdx
   572  }
   573  
   574  func (s *LDBStore) Put(ctx context.Context, chunk Chunk) error {
   575  	metrics.GetOrRegisterCounter("ldbstore.put", nil).Inc(1)
   576  	log.Trace("ldbstore.put", "key", chunk.Address())
   577  
   578  	ikey := getIndexKey(chunk.Address())
   579  	var index dpaDBIndex
   580  
   581  	po := s.po(chunk.Address())
   582  
   583  	s.lock.Lock()
   584  
   585  	if s.closed {
   586  		s.lock.Unlock()
   587  		return ErrDBClosed
   588  	}
   589  	batch := s.batch
   590  
   591  	log.Trace("ldbstore.put: s.db.Get", "key", chunk.Address(), "ikey", fmt.Sprintf("%x", ikey))
   592  	idata, err := s.db.Get(ikey)
   593  	if err != nil {
   594  		s.doPut(chunk, &index, po)
   595  	} else {
   596  		log.Trace("ldbstore.put: chunk already exists, only update access", "key", chunk.Address)
   597  		decodeIndex(idata, &index)
   598  	}
   599  	index.Access = s.accessCnt
   600  	s.accessCnt++
   601  	idata = encodeIndex(&index)
   602  	s.batch.Put(ikey, idata)
   603  
   604  	s.lock.Unlock()
   605  
   606  	select {
   607  	case s.batchesC <- struct{}{}:
   608  	default:
   609  	}
   610  
   611  	select {
   612  	case <-batch.c:
   613  		return batch.err
   614  	case <-ctx.Done():
   615  		return ctx.Err()
   616  	}
   617  }
   618  
   619  // force putting into db, does not check access index
   620  func (s *LDBStore) doPut(chunk Chunk, index *dpaDBIndex, po uint8) {
   621  	data := s.encodeDataFunc(chunk)
   622  	dkey := getDataKey(s.dataIdx, po)
   623  	s.batch.Put(dkey, data)
   624  	index.Idx = s.dataIdx
   625  	s.bucketCnt[po] = s.dataIdx
   626  	s.entryCnt++
   627  	dbEntryCount.Inc(1)
   628  	s.dataIdx++
   629  
   630  	cntKey := make([]byte, 2)
   631  	cntKey[0] = keyDistanceCnt
   632  	cntKey[1] = po
   633  	s.batch.Put(cntKey, U64ToBytes(s.bucketCnt[po]))
   634  }
   635  
   636  func (s *LDBStore) writeBatches() {
   637  	for {
   638  		select {
   639  		case <-s.quit:
   640  			log.Debug("DbStore: quit batch write loop")
   641  			return
   642  		case <-s.batchesC:
   643  			err := s.writeCurrentBatch()
   644  			if err != nil {
   645  				log.Debug("DbStore: quit batch write loop", "err", err.Error())
   646  				return
   647  			}
   648  		}
   649  	}
   650  
   651  }
   652  
   653  func (s *LDBStore) writeCurrentBatch() error {
   654  	s.lock.Lock()
   655  	defer s.lock.Unlock()
   656  	b := s.batch
   657  	l := b.Len()
   658  	if l == 0 {
   659  		return nil
   660  	}
   661  	e := s.entryCnt
   662  	d := s.dataIdx
   663  	a := s.accessCnt
   664  	s.batch = newBatch()
   665  	b.err = s.writeBatch(b, e, d, a)
   666  	close(b.c)
   667  	for e > s.capacity {
   668  		log.Trace("for >", "e", e, "s.capacity", s.capacity)
   669  		// Collect garbage in a separate goroutine
   670  		// to be able to interrupt this loop by s.quit.
   671  		done := make(chan struct{})
   672  		go func() {
   673  			s.collectGarbage(gcArrayFreeRatio)
   674  			log.Trace("collectGarbage closing done")
   675  			close(done)
   676  		}()
   677  
   678  		select {
   679  		case <-s.quit:
   680  			return errors.New("CollectGarbage terminated due to quit")
   681  		case <-done:
   682  		}
   683  		e = s.entryCnt
   684  	}
   685  	return nil
   686  }
   687  
   688  // must be called non concurrently
   689  func (s *LDBStore) writeBatch(b *dbBatch, entryCnt, dataIdx, accessCnt uint64) error {
   690  	b.Put(keyEntryCnt, U64ToBytes(entryCnt))
   691  	b.Put(keyDataIdx, U64ToBytes(dataIdx))
   692  	b.Put(keyAccessCnt, U64ToBytes(accessCnt))
   693  	l := b.Len()
   694  	if err := s.db.Write(b.Batch); err != nil {
   695  		return fmt.Errorf("unable to write batch: %v", err)
   696  	}
   697  	log.Trace(fmt.Sprintf("batch write (%d entries)", l))
   698  	return nil
   699  }
   700  
   701  // newMockEncodeDataFunc returns a function that stores the chunk data
   702  // to a mock store to bypass the default functionality encodeData.
   703  // The constructed function always returns the nil data, as DbStore does
   704  // not need to store the data, but still need to create the index.
   705  func newMockEncodeDataFunc(mockStore *mock.NodeStore) func(chunk Chunk) []byte {
   706  	return func(chunk Chunk) []byte {
   707  		if err := mockStore.Put(chunk.Address(), encodeData(chunk)); err != nil {
   708  			log.Error(fmt.Sprintf("%T: Chunk %v put: %v", mockStore, chunk.Address().Log(), err))
   709  		}
   710  		return chunk.Address()[:]
   711  	}
   712  }
   713  
   714  // try to find index; if found, update access cnt and return true
   715  func (s *LDBStore) tryAccessIdx(ikey []byte, index *dpaDBIndex) bool {
   716  	idata, err := s.db.Get(ikey)
   717  	if err != nil {
   718  		return false
   719  	}
   720  	decodeIndex(idata, index)
   721  	s.batch.Put(keyAccessCnt, U64ToBytes(s.accessCnt))
   722  	s.accessCnt++
   723  	index.Access = s.accessCnt
   724  	idata = encodeIndex(index)
   725  	s.batch.Put(ikey, idata)
   726  	select {
   727  	case s.batchesC <- struct{}{}:
   728  	default:
   729  	}
   730  	return true
   731  }
   732  
   733  func (s *LDBStore) Get(_ context.Context, addr Address) (chunk Chunk, err error) {
   734  	metrics.GetOrRegisterCounter("ldbstore.get", nil).Inc(1)
   735  	log.Trace("ldbstore.get", "key", addr)
   736  
   737  	s.lock.Lock()
   738  	defer s.lock.Unlock()
   739  	return s.get(addr)
   740  }
   741  
   742  func (s *LDBStore) get(addr Address) (chunk *chunk, err error) {
   743  	var indx dpaDBIndex
   744  	if s.closed {
   745  		return nil, ErrDBClosed
   746  	}
   747  	if s.tryAccessIdx(getIndexKey(addr), &indx) {
   748  		var data []byte
   749  		if s.getDataFunc != nil {
   750  			// if getDataFunc is defined, use it to retrieve the chunk data
   751  			log.Trace("ldbstore.get retrieve with getDataFunc", "key", addr)
   752  			data, err = s.getDataFunc(addr)
   753  			if err != nil {
   754  				return
   755  			}
   756  		} else {
   757  			// default DbStore functionality to retrieve chunk data
   758  			proximity := s.po(addr)
   759  			datakey := getDataKey(indx.Idx, proximity)
   760  			data, err = s.db.Get(datakey)
   761  			log.Trace("ldbstore.get retrieve", "key", addr, "indexkey", indx.Idx, "datakey", fmt.Sprintf("%x", datakey), "proximity", proximity)
   762  			if err != nil {
   763  				log.Trace("ldbstore.get chunk found but could not be accessed", "key", addr, "err", err)
   764  				s.delete(indx.Idx, getIndexKey(addr), s.po(addr))
   765  				return
   766  			}
   767  		}
   768  
   769  		return decodeData(addr, data)
   770  	} else {
   771  		err = ErrChunkNotFound
   772  	}
   773  
   774  	return
   775  }
   776  
   777  // newMockGetFunc returns a function that reads chunk data from
   778  // the mock database, which is used as the value for DbStore.getFunc
   779  // to bypass the default functionality of DbStore with a mock store.
   780  func newMockGetDataFunc(mockStore *mock.NodeStore) func(addr Address) (data []byte, err error) {
   781  	return func(addr Address) (data []byte, err error) {
   782  		data, err = mockStore.Get(addr)
   783  		if err == mock.ErrNotFound {
   784  			// preserve ErrChunkNotFound error
   785  			err = ErrChunkNotFound
   786  		}
   787  		return data, err
   788  	}
   789  }
   790  
   791  func (s *LDBStore) updateAccessCnt(addr Address) {
   792  
   793  	s.lock.Lock()
   794  	defer s.lock.Unlock()
   795  
   796  	var index dpaDBIndex
   797  	s.tryAccessIdx(getIndexKey(addr), &index) // result_chn == nil, only update access cnt
   798  
   799  }
   800  
   801  func (s *LDBStore) setCapacity(c uint64) {
   802  	s.lock.Lock()
   803  	defer s.lock.Unlock()
   804  
   805  	s.capacity = c
   806  
   807  	if s.entryCnt > c {
   808  		ratio := float32(1.01) - float32(c)/float32(s.entryCnt)
   809  		if ratio < gcArrayFreeRatio {
   810  			ratio = gcArrayFreeRatio
   811  		}
   812  		if ratio > 1 {
   813  			ratio = 1
   814  		}
   815  		for s.entryCnt > c {
   816  			s.collectGarbage(ratio)
   817  		}
   818  	}
   819  }
   820  
   821  func (s *LDBStore) Close() {
   822  	close(s.quit)
   823  	s.lock.Lock()
   824  	s.closed = true
   825  	s.lock.Unlock()
   826  	// force writing out current batch
   827  	s.writeCurrentBatch()
   828  	close(s.batchesC)
   829  	s.db.Close()
   830  }
   831  
   832  // SyncIterator(start, stop, po, f) calls f on each hash of a bin po from start to stop
   833  func (s *LDBStore) SyncIterator(since uint64, until uint64, po uint8, f func(Address, uint64) bool) error {
   834  	metrics.GetOrRegisterCounter("ldbstore.synciterator", nil).Inc(1)
   835  
   836  	sincekey := getDataKey(since, po)
   837  	untilkey := getDataKey(until, po)
   838  	it := s.db.NewIterator()
   839  	defer it.Release()
   840  
   841  	for ok := it.Seek(sincekey); ok; ok = it.Next() {
   842  		metrics.GetOrRegisterCounter("ldbstore.synciterator.seek", nil).Inc(1)
   843  
   844  		dbkey := it.Key()
   845  		if dbkey[0] != keyData || dbkey[1] != po || bytes.Compare(untilkey, dbkey) < 0 {
   846  			break
   847  		}
   848  		key := make([]byte, 32)
   849  		val := it.Value()
   850  		copy(key, val[:32])
   851  		if !f(Address(key), binary.BigEndian.Uint64(dbkey[2:])) {
   852  			break
   853  		}
   854  	}
   855  	return it.Error()
   856  }
   857  
   858  func databaseExists(path string) bool {
   859  	o := &opt.Options{
   860  		ErrorIfMissing: true,
   861  	}
   862  	tdb, err := leveldb.OpenFile(path, o)
   863  	if err != nil {
   864  		return false
   865  	}
   866  	defer tdb.Close()
   867  	return true
   868  }