github.com/jimmyx0x/go-ethereum@v1.10.28/core/rawdb/freezer_batch.go (about)

     1  // Copyright 2021 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package rawdb
    18  
    19  import (
    20  	"fmt"
    21  	"sync/atomic"
    22  
    23  	"github.com/ethereum/go-ethereum/common/math"
    24  	"github.com/ethereum/go-ethereum/rlp"
    25  	"github.com/golang/snappy"
    26  )
    27  
    28  // This is the maximum amount of data that will be buffered in memory
    29  // for a single freezer table batch.
    30  const freezerBatchBufferLimit = 2 * 1024 * 1024
    31  
    32  // freezerBatch is a write operation of multiple items on a freezer.
    33  type freezerBatch struct {
    34  	tables map[string]*freezerTableBatch
    35  }
    36  
    37  func newFreezerBatch(f *Freezer) *freezerBatch {
    38  	batch := &freezerBatch{tables: make(map[string]*freezerTableBatch, len(f.tables))}
    39  	for kind, table := range f.tables {
    40  		batch.tables[kind] = table.newBatch()
    41  	}
    42  	return batch
    43  }
    44  
    45  // Append adds an RLP-encoded item of the given kind.
    46  func (batch *freezerBatch) Append(kind string, num uint64, item interface{}) error {
    47  	return batch.tables[kind].Append(num, item)
    48  }
    49  
    50  // AppendRaw adds an item of the given kind.
    51  func (batch *freezerBatch) AppendRaw(kind string, num uint64, item []byte) error {
    52  	return batch.tables[kind].AppendRaw(num, item)
    53  }
    54  
    55  // reset initializes the batch.
    56  func (batch *freezerBatch) reset() {
    57  	for _, tb := range batch.tables {
    58  		tb.reset()
    59  	}
    60  }
    61  
    62  // commit is called at the end of a write operation and
    63  // writes all remaining data to tables.
    64  func (batch *freezerBatch) commit() (item uint64, writeSize int64, err error) {
    65  	// Check that count agrees on all batches.
    66  	item = uint64(math.MaxUint64)
    67  	for name, tb := range batch.tables {
    68  		if item < math.MaxUint64 && tb.curItem != item {
    69  			return 0, 0, fmt.Errorf("table %s is at item %d, want %d", name, tb.curItem, item)
    70  		}
    71  		item = tb.curItem
    72  	}
    73  
    74  	// Commit all table batches.
    75  	for _, tb := range batch.tables {
    76  		if err := tb.commit(); err != nil {
    77  			return 0, 0, err
    78  		}
    79  		writeSize += tb.totalBytes
    80  	}
    81  	return item, writeSize, nil
    82  }
    83  
    84  // freezerTableBatch is a batch for a freezer table.
    85  type freezerTableBatch struct {
    86  	t *freezerTable
    87  
    88  	sb          *snappyBuffer
    89  	encBuffer   writeBuffer
    90  	dataBuffer  []byte
    91  	indexBuffer []byte
    92  	curItem     uint64 // expected index of next append
    93  	totalBytes  int64  // counts written bytes since reset
    94  }
    95  
    96  // newBatch creates a new batch for the freezer table.
    97  func (t *freezerTable) newBatch() *freezerTableBatch {
    98  	batch := &freezerTableBatch{t: t}
    99  	if !t.noCompression {
   100  		batch.sb = new(snappyBuffer)
   101  	}
   102  	batch.reset()
   103  	return batch
   104  }
   105  
   106  // reset clears the batch for reuse.
   107  func (batch *freezerTableBatch) reset() {
   108  	batch.dataBuffer = batch.dataBuffer[:0]
   109  	batch.indexBuffer = batch.indexBuffer[:0]
   110  	batch.curItem = atomic.LoadUint64(&batch.t.items)
   111  	batch.totalBytes = 0
   112  }
   113  
   114  // Append rlp-encodes and adds data at the end of the freezer table. The item number is a
   115  // precautionary parameter to ensure data correctness, but the table will reject already
   116  // existing data.
   117  func (batch *freezerTableBatch) Append(item uint64, data interface{}) error {
   118  	if item != batch.curItem {
   119  		return fmt.Errorf("%w: have %d want %d", errOutOrderInsertion, item, batch.curItem)
   120  	}
   121  
   122  	// Encode the item.
   123  	batch.encBuffer.Reset()
   124  	if err := rlp.Encode(&batch.encBuffer, data); err != nil {
   125  		return err
   126  	}
   127  	encItem := batch.encBuffer.data
   128  	if batch.sb != nil {
   129  		encItem = batch.sb.compress(encItem)
   130  	}
   131  	return batch.appendItem(encItem)
   132  }
   133  
   134  // AppendRaw injects a binary blob at the end of the freezer table. The item number is a
   135  // precautionary parameter to ensure data correctness, but the table will reject already
   136  // existing data.
   137  func (batch *freezerTableBatch) AppendRaw(item uint64, blob []byte) error {
   138  	if item != batch.curItem {
   139  		return fmt.Errorf("%w: have %d want %d", errOutOrderInsertion, item, batch.curItem)
   140  	}
   141  
   142  	encItem := blob
   143  	if batch.sb != nil {
   144  		encItem = batch.sb.compress(blob)
   145  	}
   146  	return batch.appendItem(encItem)
   147  }
   148  
   149  func (batch *freezerTableBatch) appendItem(data []byte) error {
   150  	// Check if item fits into current data file.
   151  	itemSize := int64(len(data))
   152  	itemOffset := batch.t.headBytes + int64(len(batch.dataBuffer))
   153  	if itemOffset+itemSize > int64(batch.t.maxFileSize) {
   154  		// It doesn't fit, go to next file first.
   155  		if err := batch.commit(); err != nil {
   156  			return err
   157  		}
   158  		if err := batch.t.advanceHead(); err != nil {
   159  			return err
   160  		}
   161  		itemOffset = 0
   162  	}
   163  
   164  	// Put data to buffer.
   165  	batch.dataBuffer = append(batch.dataBuffer, data...)
   166  	batch.totalBytes += itemSize
   167  
   168  	// Put index entry to buffer.
   169  	entry := indexEntry{filenum: batch.t.headId, offset: uint32(itemOffset + itemSize)}
   170  	batch.indexBuffer = entry.append(batch.indexBuffer)
   171  	batch.curItem++
   172  
   173  	return batch.maybeCommit()
   174  }
   175  
   176  // maybeCommit writes the buffered data if the buffer is full enough.
   177  func (batch *freezerTableBatch) maybeCommit() error {
   178  	if len(batch.dataBuffer) > freezerBatchBufferLimit {
   179  		return batch.commit()
   180  	}
   181  	return nil
   182  }
   183  
   184  // commit writes the batched items to the backing freezerTable.
   185  func (batch *freezerTableBatch) commit() error {
   186  	// Write data.
   187  	_, err := batch.t.head.Write(batch.dataBuffer)
   188  	if err != nil {
   189  		return err
   190  	}
   191  	dataSize := int64(len(batch.dataBuffer))
   192  	batch.dataBuffer = batch.dataBuffer[:0]
   193  
   194  	// Write indices.
   195  	_, err = batch.t.index.Write(batch.indexBuffer)
   196  	if err != nil {
   197  		return err
   198  	}
   199  	indexSize := int64(len(batch.indexBuffer))
   200  	batch.indexBuffer = batch.indexBuffer[:0]
   201  
   202  	// Update headBytes of table.
   203  	batch.t.headBytes += dataSize
   204  	atomic.StoreUint64(&batch.t.items, batch.curItem)
   205  
   206  	// Update metrics.
   207  	batch.t.sizeGauge.Inc(dataSize + indexSize)
   208  	batch.t.writeMeter.Mark(dataSize + indexSize)
   209  	return nil
   210  }
   211  
   212  // snappyBuffer writes snappy in block format, and can be reused. It is
   213  // reset when WriteTo is called.
   214  type snappyBuffer struct {
   215  	dst []byte
   216  }
   217  
   218  // compress snappy-compresses the data.
   219  func (s *snappyBuffer) compress(data []byte) []byte {
   220  	// The snappy library does not care what the capacity of the buffer is,
   221  	// but only checks the length. If the length is too small, it will
   222  	// allocate a brand new buffer.
   223  	// To avoid that, we check the required size here, and grow the size of the
   224  	// buffer to utilize the full capacity.
   225  	if n := snappy.MaxEncodedLen(len(data)); len(s.dst) < n {
   226  		if cap(s.dst) < n {
   227  			s.dst = make([]byte, n)
   228  		}
   229  		s.dst = s.dst[:n]
   230  	}
   231  
   232  	s.dst = snappy.Encode(s.dst, data)
   233  	return s.dst
   234  }
   235  
   236  // writeBuffer implements io.Writer for a byte slice.
   237  type writeBuffer struct {
   238  	data []byte
   239  }
   240  
   241  func (wb *writeBuffer) Write(data []byte) (int, error) {
   242  	wb.data = append(wb.data, data...)
   243  	return len(data), nil
   244  }
   245  
   246  func (wb *writeBuffer) Reset() {
   247  	wb.data = wb.data[:0]
   248  }