github.com/linapex/ethereum-go-chinese@v0.0.0-20190316121929-f8b7a73c3fa1/swarm/storage/ldbstore.go (about)

     1  
     2  //<developer>
     3  //    <name>linapex 曹一峰</name>
     4  //    <email>linapex@163.com</email>
     5  //    <wx>superexc</wx>
     6  //    <qqgroup>128148617</qqgroup>
     7  //    <url>https://jsq.ink</url>
     8  //    <role>pku engineer</role>
     9  //    <date>2019-03-16 19:16:45</date>
    10  //</624450120020135936>
    11  
    12  
    13  //包BZZ的磁盘存储层
    14  //dbstore实现chunkstore接口,文件存储将其用作
    15  //块的持久存储
    16  //它基于访问计数实现清除,允许外部控制
    17  //最大容量
    18  
    19  package storage
    20  
    21  import (
    22  	"archive/tar"
    23  	"bytes"
    24  	"context"
    25  	"encoding/binary"
    26  	"encoding/hex"
    27  	"errors"
    28  	"fmt"
    29  	"io"
    30  	"io/ioutil"
    31  	"sync"
    32  
    33  	"github.com/ethereum/go-ethereum/metrics"
    34  	"github.com/ethereum/go-ethereum/rlp"
    35  	"github.com/ethereum/go-ethereum/swarm/log"
    36  	"github.com/ethereum/go-ethereum/swarm/storage/mock"
    37  	"github.com/syndtr/goleveldb/leveldb"
    38  )
    39  
    40  const (
    41  	defaultGCRatio    = 10
    42  	defaultMaxGCRound = 10000
    43  	defaultMaxGCBatch = 5000
    44  
    45  	wEntryCnt  = 1 << 0
    46  	wIndexCnt  = 1 << 1
    47  	wAccessCnt = 1 << 2
    48  )
    49  
    50  var (
    51  	dbEntryCount = metrics.NewRegisteredCounter("ldbstore.entryCnt", nil)
    52  )
    53  
    54  var (
    55  	keyIndex       = byte(0)
    56  	keyAccessCnt   = []byte{2}
    57  	keyEntryCnt    = []byte{3}
    58  	keyDataIdx     = []byte{4}
    59  	keyData        = byte(6)
    60  	keyDistanceCnt = byte(7)
    61  	keySchema      = []byte{8}
    62  keyGCIdx       = byte(9) //对块数据索引的访问,垃圾收集从第一个条目开始按升序使用
    63  )
    64  
    65  var (
    66  	ErrDBClosed = errors.New("LDBStore closed")
    67  )
    68  
    69  type LDBStoreParams struct {
    70  	*StoreParams
    71  	Path string
    72  	Po   func(Address) uint8
    73  }
    74  
    75  //newldbstoreparams用指定的值构造ldbstoreparams。
    76  func NewLDBStoreParams(storeparams *StoreParams, path string) *LDBStoreParams {
    77  	return &LDBStoreParams{
    78  		StoreParams: storeparams,
    79  		Path:        path,
    80  		Po:          func(k Address) (ret uint8) { return uint8(Proximity(storeparams.BaseKey, k[:])) },
    81  	}
    82  }
    83  
    84  type garbage struct {
    85  maxRound int           //一轮垃圾收集中要删除的最大块数
    86  maxBatch int           //一个数据库请求批中要删除的最大块数
    87  ratio    int           //1/x比率,用于计算低容量数据库上GC的块数
    88  count    int           //运行回合中删除的块数
    89  target   int           //运行回合中要删除的块数
    90  batch    *dbBatch      //删除批处理
    91  runC     chan struct{} //chan中的结构表示gc未运行
    92  }
    93  
    94  type LDBStore struct {
    95  	db *LDBDatabase
    96  
    97  //这应该存储在数据库中,以事务方式访问
    98  entryCnt  uint64 //级别数据库中的项目数
    99  accessCnt uint64 //每次我们读取/访问一个条目时,累积的数字都会增加。
   100  dataIdx   uint64 //类似于entrycnt,但我们只增加它
   101  	capacity  uint64
   102  	bucketCnt []uint64
   103  
   104  	hashfunc SwarmHasher
   105  	po       func(Address) uint8
   106  
   107  	batchesC chan struct{}
   108  	closed   bool
   109  	batch    *dbBatch
   110  	lock     sync.RWMutex
   111  	quit     chan struct{}
   112  	gc       *garbage
   113  
   114  //函数encodedatafunc用于绕过
   115  //dbstore的默认功能
   116  //用于测试的mock.nodestore。
   117  	encodeDataFunc func(chunk Chunk) []byte
   118  //如果定义了getdatafunc,它将用于
   119  //而是从本地检索块数据
   120  //级别数据库。
   121  	getDataFunc func(key Address) (data []byte, err error)
   122  }
   123  
   124  type dbBatch struct {
   125  	*leveldb.Batch
   126  	err error
   127  	c   chan struct{}
   128  }
   129  
   130  func newBatch() *dbBatch {
   131  	return &dbBatch{Batch: new(leveldb.Batch), c: make(chan struct{})}
   132  }
   133  
   134  //TODO:不传递距离函数,只传递计算距离的地址
   135  //为了避免可插拔距离度量的出现以及与提供
   136  //一种不同于实际使用的函数。
   137  func NewLDBStore(params *LDBStoreParams) (s *LDBStore, err error) {
   138  	s = new(LDBStore)
   139  	s.hashfunc = params.Hash
   140  	s.quit = make(chan struct{})
   141  
   142  	s.batchesC = make(chan struct{}, 1)
   143  	go s.writeBatches()
   144  	s.batch = newBatch()
   145  //将encodedata与默认功能关联
   146  	s.encodeDataFunc = encodeData
   147  
   148  	s.db, err = NewLDBDatabase(params.Path)
   149  	if err != nil {
   150  		return nil, err
   151  	}
   152  
   153  	s.po = params.Po
   154  	s.setCapacity(params.DbCapacity)
   155  
   156  	s.bucketCnt = make([]uint64, 0x100)
   157  	for i := 0; i < 0x100; i++ {
   158  		k := make([]byte, 2)
   159  		k[0] = keyDistanceCnt
   160  		k[1] = uint8(i)
   161  		cnt, _ := s.db.Get(k)
   162  		s.bucketCnt[i] = BytesToU64(cnt)
   163  	}
   164  	data, _ := s.db.Get(keyEntryCnt)
   165  	s.entryCnt = BytesToU64(data)
   166  	data, _ = s.db.Get(keyAccessCnt)
   167  	s.accessCnt = BytesToU64(data)
   168  	data, _ = s.db.Get(keyDataIdx)
   169  	s.dataIdx = BytesToU64(data)
   170  
   171  //设置垃圾收集
   172  	s.gc = &garbage{
   173  		maxBatch: defaultMaxGCBatch,
   174  		maxRound: defaultMaxGCRound,
   175  		ratio:    defaultGCRatio,
   176  	}
   177  
   178  	s.gc.runC = make(chan struct{}, 1)
   179  	s.gc.runC <- struct{}{}
   180  
   181  	return s, nil
   182  }
   183  
   184  //markaccessed将访问计数器增加为块的最佳工作,因此
   185  //块不会被垃圾收集。
   186  func (s *LDBStore) MarkAccessed(addr Address) {
   187  	s.lock.Lock()
   188  	defer s.lock.Unlock()
   189  
   190  	if s.closed {
   191  		return
   192  	}
   193  
   194  	proximity := s.po(addr)
   195  	s.tryAccessIdx(addr, proximity)
   196  }
   197  
   198  //初始化并设置用于处理gc循环的值
   199  func (s *LDBStore) startGC(c int) {
   200  
   201  	s.gc.count = 0
   202  //计算删除的目标数量
   203  	if c >= s.gc.maxRound {
   204  		s.gc.target = s.gc.maxRound
   205  	} else {
   206  		s.gc.target = c / s.gc.ratio
   207  	}
   208  	s.gc.batch = newBatch()
   209  	log.Debug("startgc", "requested", c, "target", s.gc.target)
   210  }
   211  
   212  //newmockdbstore创建dbstore的新实例
   213  //mockstore设置为提供的值。如果mockstore参数为零,
   214  //此函数的行为与newdbstore完全相同。
   215  func NewMockDbStore(params *LDBStoreParams, mockStore *mock.NodeStore) (s *LDBStore, err error) {
   216  	s, err = NewLDBStore(params)
   217  	if err != nil {
   218  		return nil, err
   219  	}
   220  
   221  //用模拟商店功能替换Put和Get
   222  	if mockStore != nil {
   223  		s.encodeDataFunc = newMockEncodeDataFunc(mockStore)
   224  		s.getDataFunc = newMockGetDataFunc(mockStore)
   225  	}
   226  	return
   227  }
   228  
   229  type dpaDBIndex struct {
   230  	Idx    uint64
   231  	Access uint64
   232  }
   233  
   234  func BytesToU64(data []byte) uint64 {
   235  	if len(data) < 8 {
   236  		return 0
   237  	}
   238  	return binary.BigEndian.Uint64(data)
   239  }
   240  
   241  func U64ToBytes(val uint64) []byte {
   242  	data := make([]byte, 8)
   243  	binary.BigEndian.PutUint64(data, val)
   244  	return data
   245  }
   246  
   247  func getIndexKey(hash Address) []byte {
   248  	hashSize := len(hash)
   249  	key := make([]byte, hashSize+1)
   250  	key[0] = keyIndex
   251  	copy(key[1:], hash[:])
   252  	return key
   253  }
   254  
   255  func getDataKey(idx uint64, po uint8) []byte {
   256  	key := make([]byte, 10)
   257  	key[0] = keyData
   258  	key[1] = po
   259  	binary.BigEndian.PutUint64(key[2:], idx)
   260  
   261  	return key
   262  }
   263  
   264  func getGCIdxKey(index *dpaDBIndex) []byte {
   265  	key := make([]byte, 9)
   266  	key[0] = keyGCIdx
   267  	binary.BigEndian.PutUint64(key[1:], index.Access)
   268  	return key
   269  }
   270  
   271  func getGCIdxValue(index *dpaDBIndex, po uint8, addr Address) []byte {
   272  val := make([]byte, 41) //po=1,index.index=8,address=32
   273  	val[0] = po
   274  	binary.BigEndian.PutUint64(val[1:], index.Idx)
   275  	copy(val[9:], addr)
   276  	return val
   277  }
   278  
   279  func parseIdxKey(key []byte) (byte, []byte) {
   280  	return key[0], key[1:]
   281  }
   282  
   283  func parseGCIdxEntry(accessCnt []byte, val []byte) (index *dpaDBIndex, po uint8, addr Address) {
   284  	index = &dpaDBIndex{
   285  		Idx:    binary.BigEndian.Uint64(val[1:]),
   286  		Access: binary.BigEndian.Uint64(accessCnt),
   287  	}
   288  	po = val[0]
   289  	addr = val[9:]
   290  	return
   291  }
   292  
   293  func encodeIndex(index *dpaDBIndex) []byte {
   294  	data, _ := rlp.EncodeToBytes(index)
   295  	return data
   296  }
   297  
   298  func encodeData(chunk Chunk) []byte {
   299  //始终为返回的字节片创建新的基础数组。
   300  //chunk.address数组可以在返回的切片中使用,
   301  //可能在代码的后面或级别数据库中更改,从而导致
   302  //地址也改变了。
   303  	return append(append([]byte{}, chunk.Address()[:]...), chunk.Data()...)
   304  }
   305  
   306  func decodeIndex(data []byte, index *dpaDBIndex) error {
   307  	dec := rlp.NewStream(bytes.NewReader(data), 0)
   308  	return dec.Decode(index)
   309  }
   310  
   311  func decodeData(addr Address, data []byte) (*chunk, error) {
   312  	return NewChunk(addr, data[32:]), nil
   313  }
   314  
   315  func (s *LDBStore) collectGarbage() error {
   316  
   317  //防止重复GC在已经运行时启动
   318  	select {
   319  	case <-s.gc.runC:
   320  	default:
   321  		return nil
   322  	}
   323  
   324  	s.lock.Lock()
   325  	entryCnt := s.entryCnt
   326  	s.lock.Unlock()
   327  
   328  	metrics.GetOrRegisterCounter("ldbstore.collectgarbage", nil).Inc(1)
   329  
   330  //计算要收集和重置计数器的块的数量
   331  	s.startGC(int(entryCnt))
   332  	log.Debug("collectGarbage", "target", s.gc.target, "entryCnt", entryCnt)
   333  
   334  	var totalDeleted int
   335  	for s.gc.count < s.gc.target {
   336  		it := s.db.NewIterator()
   337  		ok := it.Seek([]byte{keyGCIdx})
   338  		var singleIterationCount int
   339  
   340  //每个批都需要一个锁,这样我们就可以避免条目同时更改accessIDX。
   341  		s.lock.Lock()
   342  		for ; ok && (singleIterationCount < s.gc.maxBatch); ok = it.Next() {
   343  
   344  //如果没有更多的访问索引键,则退出
   345  			itkey := it.Key()
   346  			if (itkey == nil) || (itkey[0] != keyGCIdx) {
   347  				break
   348  			}
   349  
   350  //从访问索引获取区块数据项
   351  			val := it.Value()
   352  			index, po, hash := parseGCIdxEntry(itkey[1:], val)
   353  			keyIdx := make([]byte, 33)
   354  			keyIdx[0] = keyIndex
   355  			copy(keyIdx[1:], hash)
   356  
   357  //将删除操作添加到批处理
   358  			s.delete(s.gc.batch.Batch, index, keyIdx, po)
   359  			singleIterationCount++
   360  			s.gc.count++
   361  			log.Trace("garbage collect enqueued chunk for deletion", "key", hash)
   362  
   363  //如果目标不在最大垃圾批处理边界上,则中断
   364  			if s.gc.count >= s.gc.target {
   365  				break
   366  			}
   367  		}
   368  
   369  		s.writeBatch(s.gc.batch, wEntryCnt)
   370  		s.lock.Unlock()
   371  		it.Release()
   372  		log.Trace("garbage collect batch done", "batch", singleIterationCount, "total", s.gc.count)
   373  	}
   374  
   375  	s.gc.runC <- struct{}{}
   376  	log.Debug("garbage collect done", "c", s.gc.count)
   377  
   378  	metrics.GetOrRegisterCounter("ldbstore.collectgarbage.delete", nil).Inc(int64(totalDeleted))
   379  	return nil
   380  }
   381  
   382  //export将存储区中的所有块写入tar存档,并返回
   383  //写入的块数。
   384  func (s *LDBStore) Export(out io.Writer) (int64, error) {
   385  	tw := tar.NewWriter(out)
   386  	defer tw.Close()
   387  
   388  	it := s.db.NewIterator()
   389  	defer it.Release()
   390  	var count int64
   391  	for ok := it.Seek([]byte{keyIndex}); ok; ok = it.Next() {
   392  		key := it.Key()
   393  		if (key == nil) || (key[0] != keyIndex) {
   394  			break
   395  		}
   396  
   397  		var index dpaDBIndex
   398  
   399  		hash := key[1:]
   400  		decodeIndex(it.Value(), &index)
   401  		po := s.po(hash)
   402  		datakey := getDataKey(index.Idx, po)
   403  		log.Trace("store.export", "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po)
   404  		data, err := s.db.Get(datakey)
   405  		if err != nil {
   406  			log.Warn(fmt.Sprintf("Chunk %x found but could not be accessed: %v", key, err))
   407  			continue
   408  		}
   409  
   410  		hdr := &tar.Header{
   411  			Name: hex.EncodeToString(hash),
   412  			Mode: 0644,
   413  			Size: int64(len(data)),
   414  		}
   415  		if err := tw.WriteHeader(hdr); err != nil {
   416  			return count, err
   417  		}
   418  		if _, err := tw.Write(data); err != nil {
   419  			return count, err
   420  		}
   421  		count++
   422  	}
   423  
   424  	return count, nil
   425  }
   426  
   427  //块读取。
   428  func (s *LDBStore) Import(in io.Reader) (int64, error) {
   429  	tr := tar.NewReader(in)
   430  
   431  	ctx, cancel := context.WithCancel(context.Background())
   432  	defer cancel()
   433  
   434  	countC := make(chan int64)
   435  	errC := make(chan error)
   436  	var count int64
   437  	go func() {
   438  		for {
   439  			hdr, err := tr.Next()
   440  			if err == io.EOF {
   441  				break
   442  			} else if err != nil {
   443  				select {
   444  				case errC <- err:
   445  				case <-ctx.Done():
   446  				}
   447  			}
   448  
   449  			if len(hdr.Name) != 64 {
   450  				log.Warn("ignoring non-chunk file", "name", hdr.Name)
   451  				continue
   452  			}
   453  
   454  			keybytes, err := hex.DecodeString(hdr.Name)
   455  			if err != nil {
   456  				log.Warn("ignoring invalid chunk file", "name", hdr.Name, "err", err)
   457  				continue
   458  			}
   459  
   460  			data, err := ioutil.ReadAll(tr)
   461  			if err != nil {
   462  				select {
   463  				case errC <- err:
   464  				case <-ctx.Done():
   465  				}
   466  			}
   467  			key := Address(keybytes)
   468  			chunk := NewChunk(key, data[32:])
   469  
   470  			go func() {
   471  				select {
   472  				case errC <- s.Put(ctx, chunk):
   473  				case <-ctx.Done():
   474  				}
   475  			}()
   476  
   477  			count++
   478  		}
   479  		countC <- count
   480  	}()
   481  
   482  //等待存储所有块
   483  	i := int64(0)
   484  	var total int64
   485  	for {
   486  		select {
   487  		case err := <-errC:
   488  			if err != nil {
   489  				return count, err
   490  			}
   491  			i++
   492  		case total = <-countC:
   493  		case <-ctx.Done():
   494  			return i, ctx.Err()
   495  		}
   496  		if total > 0 && i == total {
   497  			return total, nil
   498  		}
   499  	}
   500  }
   501  
   502  //cleanup循环访问数据库,并在块通过“f”条件时删除块。
   503  func (s *LDBStore) Cleanup(f func(*chunk) bool) {
   504  	var errorsFound, removed, total int
   505  
   506  	it := s.db.NewIterator()
   507  	defer it.Release()
   508  	for ok := it.Seek([]byte{keyIndex}); ok; ok = it.Next() {
   509  		key := it.Key()
   510  		if (key == nil) || (key[0] != keyIndex) {
   511  			break
   512  		}
   513  		total++
   514  		var index dpaDBIndex
   515  		err := decodeIndex(it.Value(), &index)
   516  		if err != nil {
   517  			log.Warn("Cannot decode")
   518  			errorsFound++
   519  			continue
   520  		}
   521  		hash := key[1:]
   522  		po := s.po(hash)
   523  		datakey := getDataKey(index.Idx, po)
   524  		data, err := s.db.Get(datakey)
   525  		if err != nil {
   526  			found := false
   527  
   528  //最大可能接近度为255
   529  			for po = 1; po <= 255; po++ {
   530  				datakey = getDataKey(index.Idx, po)
   531  				data, err = s.db.Get(datakey)
   532  				if err == nil {
   533  					found = true
   534  					break
   535  				}
   536  			}
   537  
   538  			if !found {
   539  				log.Warn(fmt.Sprintf("Chunk %x found but count not be accessed with any po", key))
   540  				errorsFound++
   541  				continue
   542  			}
   543  		}
   544  
   545  		ck := data[:32]
   546  		c, err := decodeData(ck, data)
   547  		if err != nil {
   548  			log.Error("decodeData error", "err", err)
   549  			continue
   550  		}
   551  
   552  		cs := int64(binary.LittleEndian.Uint64(c.sdata[:8]))
   553  		log.Trace("chunk", "key", fmt.Sprintf("%x", key), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.sdata), "size", cs)
   554  
   555  //如果要删除块
   556  		if f(c) {
   557  			log.Warn("chunk for cleanup", "key", fmt.Sprintf("%x", key), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.sdata), "size", cs)
   558  			s.deleteNow(&index, getIndexKey(key[1:]), po)
   559  			removed++
   560  			errorsFound++
   561  		}
   562  	}
   563  
   564  	log.Warn(fmt.Sprintf("Found %v errors out of %v entries. Removed %v chunks.", errorsFound, total, removed))
   565  }
   566  
   567  //CleangIndex从头重建垃圾收集器索引,而
   568  //删除不一致的元素,例如缺少数据块的索引。
   569  //警告:这是一个相当重的长期运行功能。
   570  func (s *LDBStore) CleanGCIndex() error {
   571  	s.lock.Lock()
   572  	defer s.lock.Unlock()
   573  
   574  	batch := leveldb.Batch{}
   575  
   576  	var okEntryCount uint64
   577  	var totalEntryCount uint64
   578  
   579  //抛开所有GC索引,我们将从清除的索引重新生成
   580  	it := s.db.NewIterator()
   581  	it.Seek([]byte{keyGCIdx})
   582  	var gcDeletes int
   583  	for it.Valid() {
   584  		rowType, _ := parseIdxKey(it.Key())
   585  		if rowType != keyGCIdx {
   586  			break
   587  		}
   588  		batch.Delete(it.Key())
   589  		gcDeletes++
   590  		it.Next()
   591  	}
   592  	log.Debug("gc", "deletes", gcDeletes)
   593  	if err := s.db.Write(&batch); err != nil {
   594  		return err
   595  	}
   596  	batch.Reset()
   597  
   598  	it.Release()
   599  
   600  //修正的采购订单索引指针值
   601  	var poPtrs [256]uint64
   602  
   603  //如果块计数不在4096迭代边界上,则设置为true
   604  	var doneIterating bool
   605  
   606  //上一次迭代中的最后一个键索引
   607  	lastIdxKey := []byte{keyIndex}
   608  
   609  //调试输出计数器
   610  	var cleanBatchCount int
   611  
   612  //浏览所有键索引项
   613  	for !doneIterating {
   614  		cleanBatchCount++
   615  		var idxs []dpaDBIndex
   616  		var chunkHashes [][]byte
   617  		var pos []uint8
   618  		it := s.db.NewIterator()
   619  
   620  		it.Seek(lastIdxKey)
   621  
   622  //4096只是一个很好的数字,不要在这里寻找任何隐藏的意义…
   623  		var i int
   624  		for i = 0; i < 4096; i++ {
   625  
   626  //除非数据库为空,否则不会发生这种情况。
   627  //但为了安全起见
   628  			if !it.Valid() {
   629  				doneIterating = true
   630  				break
   631  			}
   632  
   633  //如果不再是keyindex,我们就完成了迭代
   634  			rowType, chunkHash := parseIdxKey(it.Key())
   635  			if rowType != keyIndex {
   636  				doneIterating = true
   637  				break
   638  			}
   639  
   640  //解码检索到的索引
   641  			var idx dpaDBIndex
   642  			err := decodeIndex(it.Value(), &idx)
   643  			if err != nil {
   644  				return fmt.Errorf("corrupt index: %v", err)
   645  			}
   646  			po := s.po(chunkHash)
   647  			lastIdxKey = it.Key()
   648  
   649  //如果找不到数据密钥,请删除该条目
   650  //如果找到它,请添加到新的GC索引数组中以创建
   651  			dataKey := getDataKey(idx.Idx, po)
   652  			_, err = s.db.Get(dataKey)
   653  			if err != nil {
   654  				log.Warn("deleting inconsistent index (missing data)", "key", chunkHash)
   655  				batch.Delete(it.Key())
   656  			} else {
   657  				idxs = append(idxs, idx)
   658  				chunkHashes = append(chunkHashes, chunkHash)
   659  				pos = append(pos, po)
   660  				okEntryCount++
   661  				if idx.Idx > poPtrs[po] {
   662  					poPtrs[po] = idx.Idx
   663  				}
   664  			}
   665  			totalEntryCount++
   666  			it.Next()
   667  		}
   668  		it.Release()
   669  
   670  //刷新键索引更正
   671  		err := s.db.Write(&batch)
   672  		if err != nil {
   673  			return err
   674  		}
   675  		batch.Reset()
   676  
   677  //添加正确的GC索引
   678  		for i, okIdx := range idxs {
   679  			gcIdxKey := getGCIdxKey(&okIdx)
   680  			gcIdxData := getGCIdxValue(&okIdx, pos[i], chunkHashes[i])
   681  			batch.Put(gcIdxKey, gcIdxData)
   682  			log.Trace("clean ok", "key", chunkHashes[i], "gcKey", gcIdxKey, "gcData", gcIdxData)
   683  		}
   684  
   685  //冲洗它们
   686  		err = s.db.Write(&batch)
   687  		if err != nil {
   688  			return err
   689  		}
   690  		batch.Reset()
   691  
   692  		log.Debug("clean gc index pass", "batch", cleanBatchCount, "checked", i, "kept", len(idxs))
   693  	}
   694  
   695  	log.Debug("gc cleanup entries", "ok", okEntryCount, "total", totalEntryCount, "batchlen", batch.Len())
   696  
   697  //最后添加更新的条目计数
   698  	var entryCount [8]byte
   699  	binary.BigEndian.PutUint64(entryCount[:], okEntryCount)
   700  	batch.Put(keyEntryCnt, entryCount[:])
   701  
   702  //并添加新的采购订单索引指针
   703  	var poKey [2]byte
   704  	poKey[0] = keyDistanceCnt
   705  	for i, poPtr := range poPtrs {
   706  		poKey[1] = uint8(i)
   707  		if poPtr == 0 {
   708  			batch.Delete(poKey[:])
   709  		} else {
   710  			var idxCount [8]byte
   711  			binary.BigEndian.PutUint64(idxCount[:], poPtr)
   712  			batch.Put(poKey[:], idxCount[:])
   713  		}
   714  	}
   715  
   716  //如果你做到这一步,你的硬盘就活下来了。祝贺你
   717  	return s.db.Write(&batch)
   718  }
   719  
   720  //删除是删除块并更新索引。
   721  //线程安全
   722  func (s *LDBStore) Delete(addr Address) error {
   723  	s.lock.Lock()
   724  	defer s.lock.Unlock()
   725  
   726  	ikey := getIndexKey(addr)
   727  
   728  	idata, err := s.db.Get(ikey)
   729  	if err != nil {
   730  		return err
   731  	}
   732  
   733  	var idx dpaDBIndex
   734  	decodeIndex(idata, &idx)
   735  	proximity := s.po(addr)
   736  	return s.deleteNow(&idx, ikey, proximity)
   737  }
   738  
   739  //立即执行一个删除操作
   740  //请参见*ldbstore.delete
   741  func (s *LDBStore) deleteNow(idx *dpaDBIndex, idxKey []byte, po uint8) error {
   742  	batch := new(leveldb.Batch)
   743  	s.delete(batch, idx, idxKey, po)
   744  	return s.db.Write(batch)
   745  }
   746  
   747  //向提供的批添加删除块操作
   748  //如果直接调用,则递减entrycount,无论删除时块是否存在。包裹风险最大为UInt64
   749  func (s *LDBStore) delete(batch *leveldb.Batch, idx *dpaDBIndex, idxKey []byte, po uint8) {
   750  	metrics.GetOrRegisterCounter("ldbstore.delete", nil).Inc(1)
   751  
   752  	gcIdxKey := getGCIdxKey(idx)
   753  	batch.Delete(gcIdxKey)
   754  	dataKey := getDataKey(idx.Idx, po)
   755  	batch.Delete(dataKey)
   756  	batch.Delete(idxKey)
   757  	s.entryCnt--
   758  	dbEntryCount.Dec(1)
   759  	cntKey := make([]byte, 2)
   760  	cntKey[0] = keyDistanceCnt
   761  	cntKey[1] = po
   762  	batch.Put(keyEntryCnt, U64ToBytes(s.entryCnt))
   763  	batch.Put(cntKey, U64ToBytes(s.bucketCnt[po]))
   764  }
   765  
   766  func (s *LDBStore) BinIndex(po uint8) uint64 {
   767  	s.lock.RLock()
   768  	defer s.lock.RUnlock()
   769  	return s.bucketCnt[po]
   770  }
   771  
   772  //Put向数据库添加一个块,添加索引并增加全局计数器。
   773  //如果它已经存在,它只增加现有条目的访问计数。
   774  //线程安全
   775  func (s *LDBStore) Put(ctx context.Context, chunk Chunk) error {
   776  	metrics.GetOrRegisterCounter("ldbstore.put", nil).Inc(1)
   777  	log.Trace("ldbstore.put", "key", chunk.Address())
   778  
   779  	ikey := getIndexKey(chunk.Address())
   780  	var index dpaDBIndex
   781  
   782  	po := s.po(chunk.Address())
   783  
   784  	s.lock.Lock()
   785  
   786  	if s.closed {
   787  		s.lock.Unlock()
   788  		return ErrDBClosed
   789  	}
   790  	batch := s.batch
   791  
   792  	log.Trace("ldbstore.put: s.db.Get", "key", chunk.Address(), "ikey", fmt.Sprintf("%x", ikey))
   793  	_, err := s.db.Get(ikey)
   794  	if err != nil {
   795  		s.doPut(chunk, &index, po)
   796  	}
   797  	idata := encodeIndex(&index)
   798  	s.batch.Put(ikey, idata)
   799  
   800  //添加用于垃圾收集的访问chunkindex索引
   801  	gcIdxKey := getGCIdxKey(&index)
   802  	gcIdxData := getGCIdxValue(&index, po, chunk.Address())
   803  	s.batch.Put(gcIdxKey, gcIdxData)
   804  	s.lock.Unlock()
   805  
   806  	select {
   807  	case s.batchesC <- struct{}{}:
   808  	default:
   809  	}
   810  
   811  	select {
   812  	case <-batch.c:
   813  		return batch.err
   814  	case <-ctx.Done():
   815  		return ctx.Err()
   816  	}
   817  }
   818  
   819  //强制放入数据库,不检查或更新必要的索引
   820  func (s *LDBStore) doPut(chunk Chunk, index *dpaDBIndex, po uint8) {
   821  	data := s.encodeDataFunc(chunk)
   822  	dkey := getDataKey(s.dataIdx, po)
   823  	s.batch.Put(dkey, data)
   824  	index.Idx = s.dataIdx
   825  	s.bucketCnt[po] = s.dataIdx
   826  	s.entryCnt++
   827  	dbEntryCount.Inc(1)
   828  	s.dataIdx++
   829  	index.Access = s.accessCnt
   830  	s.accessCnt++
   831  	cntKey := make([]byte, 2)
   832  	cntKey[0] = keyDistanceCnt
   833  	cntKey[1] = po
   834  	s.batch.Put(cntKey, U64ToBytes(s.bucketCnt[po]))
   835  }
   836  
   837  func (s *LDBStore) writeBatches() {
   838  	for {
   839  		select {
   840  		case <-s.quit:
   841  			log.Debug("DbStore: quit batch write loop")
   842  			return
   843  		case <-s.batchesC:
   844  			err := s.writeCurrentBatch()
   845  			if err != nil {
   846  				log.Debug("DbStore: quit batch write loop", "err", err.Error())
   847  				return
   848  			}
   849  		}
   850  	}
   851  
   852  }
   853  
   854  func (s *LDBStore) writeCurrentBatch() error {
   855  	s.lock.Lock()
   856  	defer s.lock.Unlock()
   857  	b := s.batch
   858  	l := b.Len()
   859  	if l == 0 {
   860  		return nil
   861  	}
   862  	s.batch = newBatch()
   863  	b.err = s.writeBatch(b, wEntryCnt|wAccessCnt|wIndexCnt)
   864  	close(b.c)
   865  	if s.entryCnt >= s.capacity {
   866  		go s.collectGarbage()
   867  	}
   868  	return nil
   869  }
   870  
   871  //必须非并发调用
   872  func (s *LDBStore) writeBatch(b *dbBatch, wFlag uint8) error {
   873  	if wFlag&wEntryCnt > 0 {
   874  		b.Put(keyEntryCnt, U64ToBytes(s.entryCnt))
   875  	}
   876  	if wFlag&wIndexCnt > 0 {
   877  		b.Put(keyDataIdx, U64ToBytes(s.dataIdx))
   878  	}
   879  	if wFlag&wAccessCnt > 0 {
   880  		b.Put(keyAccessCnt, U64ToBytes(s.accessCnt))
   881  	}
   882  	l := b.Len()
   883  	if err := s.db.Write(b.Batch); err != nil {
   884  		return fmt.Errorf("unable to write batch: %v", err)
   885  	}
   886  	log.Trace(fmt.Sprintf("batch write (%d entries)", l))
   887  	return nil
   888  }
   889  
   890  //newmockencodedatafunc返回一个存储块数据的函数
   891  //到模拟存储,以绕过默认功能encodedata。
   892  //构造的函数总是返回nil数据,就像dbstore一样
   893  //不需要存储数据,但仍然需要创建索引。
   894  func newMockEncodeDataFunc(mockStore *mock.NodeStore) func(chunk Chunk) []byte {
   895  	return func(chunk Chunk) []byte {
   896  		if err := mockStore.Put(chunk.Address(), encodeData(chunk)); err != nil {
   897  			log.Error(fmt.Sprintf("%T: Chunk %v put: %v", mockStore, chunk.Address().Log(), err))
   898  		}
   899  		return chunk.Address()[:]
   900  	}
   901  }
   902  
   903  //TryAccessIDX尝试查找索引项。如果找到,则增加访问
   904  //对垃圾收集计数,并返回索引项,对found返回true,
   905  //否则返回nil和false。
   906  func (s *LDBStore) tryAccessIdx(addr Address, po uint8) (*dpaDBIndex, bool) {
   907  	ikey := getIndexKey(addr)
   908  	idata, err := s.db.Get(ikey)
   909  	if err != nil {
   910  		return nil, false
   911  	}
   912  
   913  	index := new(dpaDBIndex)
   914  	decodeIndex(idata, index)
   915  	oldGCIdxKey := getGCIdxKey(index)
   916  	s.batch.Put(keyAccessCnt, U64ToBytes(s.accessCnt))
   917  	index.Access = s.accessCnt
   918  	idata = encodeIndex(index)
   919  	s.accessCnt++
   920  	s.batch.Put(ikey, idata)
   921  	newGCIdxKey := getGCIdxKey(index)
   922  	newGCIdxData := getGCIdxValue(index, po, ikey[1:])
   923  	s.batch.Delete(oldGCIdxKey)
   924  	s.batch.Put(newGCIdxKey, newGCIdxData)
   925  	select {
   926  	case s.batchesC <- struct{}{}:
   927  	default:
   928  	}
   929  	return index, true
   930  }
   931  
   932  //GetSchema正在返回从LevelDB读取的数据存储的当前命名架构。
   933  func (s *LDBStore) GetSchema() (string, error) {
   934  	s.lock.Lock()
   935  	defer s.lock.Unlock()
   936  
   937  	data, err := s.db.Get(keySchema)
   938  	if err != nil {
   939  		if err == leveldb.ErrNotFound {
   940  			return DbSchemaNone, nil
   941  		}
   942  		return "", err
   943  	}
   944  
   945  	return string(data), nil
   946  }
   947  
   948  //PutSchema正在将命名架构保存到LevelDB数据存储
   949  func (s *LDBStore) PutSchema(schema string) error {
   950  	s.lock.Lock()
   951  	defer s.lock.Unlock()
   952  
   953  	return s.db.Put(keySchema, []byte(schema))
   954  }
   955  
   956  //get从数据库中检索与提供的键匹配的块。
   957  //如果块条目不存在,则返回错误
   958  //更新访问计数并且是线程安全的
   959  func (s *LDBStore) Get(_ context.Context, addr Address) (chunk Chunk, err error) {
   960  	metrics.GetOrRegisterCounter("ldbstore.get", nil).Inc(1)
   961  	log.Trace("ldbstore.get", "key", addr)
   962  
   963  	s.lock.Lock()
   964  	defer s.lock.Unlock()
   965  	return s.get(addr)
   966  }
   967  
   968  //TODO:为了符合此对象的其他私有方法,不应更新索引
   969  func (s *LDBStore) get(addr Address) (chunk *chunk, err error) {
   970  	if s.closed {
   971  		return nil, ErrDBClosed
   972  	}
   973  	proximity := s.po(addr)
   974  	index, found := s.tryAccessIdx(addr, proximity)
   975  	if found {
   976  		var data []byte
   977  		if s.getDataFunc != nil {
   978  //如果定义了getdatafunc,则使用它来检索块数据
   979  			log.Trace("ldbstore.get retrieve with getDataFunc", "key", addr)
   980  			data, err = s.getDataFunc(addr)
   981  			if err != nil {
   982  				return
   983  			}
   984  		} else {
   985  //用于检索块数据的默认dbstore功能
   986  			datakey := getDataKey(index.Idx, proximity)
   987  			data, err = s.db.Get(datakey)
   988  			log.Trace("ldbstore.get retrieve", "key", addr, "indexkey", index.Idx, "datakey", fmt.Sprintf("%x", datakey), "proximity", proximity)
   989  			if err != nil {
   990  				log.Trace("ldbstore.get chunk found but could not be accessed", "key", addr, "err", err)
   991  				s.deleteNow(index, getIndexKey(addr), s.po(addr))
   992  				return
   993  			}
   994  		}
   995  
   996  		return decodeData(addr, data)
   997  	} else {
   998  		err = ErrChunkNotFound
   999  	}
  1000  
  1001  	return
  1002  }
  1003  
  1004  //newmockgetfunc返回一个函数,该函数从
  1005  //模拟数据库,用作dbstore.getfunc的值
  1006  //绕过具有模拟存储的dbstore的默认功能。
  1007  func newMockGetDataFunc(mockStore *mock.NodeStore) func(addr Address) (data []byte, err error) {
  1008  	return func(addr Address) (data []byte, err error) {
  1009  		data, err = mockStore.Get(addr)
  1010  		if err == mock.ErrNotFound {
  1011  //保留errChunkOnFound错误
  1012  			err = ErrChunkNotFound
  1013  		}
  1014  		return data, err
  1015  	}
  1016  }
  1017  
  1018  func (s *LDBStore) setCapacity(c uint64) {
  1019  	s.lock.Lock()
  1020  	defer s.lock.Unlock()
  1021  
  1022  	s.capacity = c
  1023  
  1024  	for s.entryCnt > c {
  1025  		s.collectGarbage()
  1026  	}
  1027  }
  1028  
  1029  func (s *LDBStore) Close() {
  1030  	close(s.quit)
  1031  	s.lock.Lock()
  1032  	s.closed = true
  1033  	s.lock.Unlock()
  1034  //强制写出当前批
  1035  	s.writeCurrentBatch()
  1036  	close(s.batchesC)
  1037  	s.db.Close()
  1038  }
  1039  
  1040  //synciterator(start、stop、po、f)从开始到停止对bin po的每个哈希调用f
  1041  func (s *LDBStore) SyncIterator(since uint64, until uint64, po uint8, f func(Address, uint64) bool) error {
  1042  	metrics.GetOrRegisterCounter("ldbstore.synciterator", nil).Inc(1)
  1043  
  1044  	sincekey := getDataKey(since, po)
  1045  	untilkey := getDataKey(until, po)
  1046  	it := s.db.NewIterator()
  1047  	defer it.Release()
  1048  
  1049  	for ok := it.Seek(sincekey); ok; ok = it.Next() {
  1050  		metrics.GetOrRegisterCounter("ldbstore.synciterator.seek", nil).Inc(1)
  1051  
  1052  		dbkey := it.Key()
  1053  		if dbkey[0] != keyData || dbkey[1] != po || bytes.Compare(untilkey, dbkey) < 0 {
  1054  			break
  1055  		}
  1056  		key := make([]byte, 32)
  1057  		val := it.Value()
  1058  		copy(key, val[:32])
  1059  		if !f(Address(key), binary.BigEndian.Uint64(dbkey[2:])) {
  1060  			break
  1061  		}
  1062  	}
  1063  	return it.Error()
  1064  }
  1065