github.com/cryptotooltop/go-ethereum@v0.0.0-20231103184714-151d1922f3e5/core/rawdb/freezer_batch.go (about) 1 // Copyright 2021 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package rawdb 18 19 import ( 20 "fmt" 21 "sync/atomic" 22 23 "github.com/golang/snappy" 24 25 "github.com/scroll-tech/go-ethereum/common/math" 26 "github.com/scroll-tech/go-ethereum/rlp" 27 ) 28 29 // This is the maximum amount of data that will be buffered in memory 30 // for a single freezer table batch. 31 const freezerBatchBufferLimit = 2 * 1024 * 1024 32 33 // freezerBatch is a write operation of multiple items on a freezer. 34 type freezerBatch struct { 35 tables map[string]*freezerTableBatch 36 } 37 38 func newFreezerBatch(f *freezer) *freezerBatch { 39 batch := &freezerBatch{tables: make(map[string]*freezerTableBatch, len(f.tables))} 40 for kind, table := range f.tables { 41 batch.tables[kind] = table.newBatch() 42 } 43 return batch 44 } 45 46 // Append adds an RLP-encoded item of the given kind. 47 func (batch *freezerBatch) Append(kind string, num uint64, item interface{}) error { 48 return batch.tables[kind].Append(num, item) 49 } 50 51 // AppendRaw adds an item of the given kind. 52 func (batch *freezerBatch) AppendRaw(kind string, num uint64, item []byte) error { 53 return batch.tables[kind].AppendRaw(num, item) 54 } 55 56 // reset initializes the batch. 57 func (batch *freezerBatch) reset() { 58 for _, tb := range batch.tables { 59 tb.reset() 60 } 61 } 62 63 // commit is called at the end of a write operation and 64 // writes all remaining data to tables. 65 func (batch *freezerBatch) commit() (item uint64, writeSize int64, err error) { 66 // Check that count agrees on all batches. 67 item = uint64(math.MaxUint64) 68 for name, tb := range batch.tables { 69 if item < math.MaxUint64 && tb.curItem != item { 70 return 0, 0, fmt.Errorf("table %s is at item %d, want %d", name, tb.curItem, item) 71 } 72 item = tb.curItem 73 } 74 75 // Commit all table batches. 76 for _, tb := range batch.tables { 77 if err := tb.commit(); err != nil { 78 return 0, 0, err 79 } 80 writeSize += tb.totalBytes 81 } 82 return item, writeSize, nil 83 } 84 85 // freezerTableBatch is a batch for a freezer table. 86 type freezerTableBatch struct { 87 t *freezerTable 88 89 sb *snappyBuffer 90 encBuffer writeBuffer 91 dataBuffer []byte 92 indexBuffer []byte 93 curItem uint64 // expected index of next append 94 totalBytes int64 // counts written bytes since reset 95 } 96 97 // newBatch creates a new batch for the freezer table. 98 func (t *freezerTable) newBatch() *freezerTableBatch { 99 batch := &freezerTableBatch{t: t} 100 if !t.noCompression { 101 batch.sb = new(snappyBuffer) 102 } 103 batch.reset() 104 return batch 105 } 106 107 // reset clears the batch for reuse. 108 func (batch *freezerTableBatch) reset() { 109 batch.dataBuffer = batch.dataBuffer[:0] 110 batch.indexBuffer = batch.indexBuffer[:0] 111 batch.curItem = atomic.LoadUint64(&batch.t.items) 112 batch.totalBytes = 0 113 } 114 115 // Append rlp-encodes and adds data at the end of the freezer table. The item number is a 116 // precautionary parameter to ensure data correctness, but the table will reject already 117 // existing data. 118 func (batch *freezerTableBatch) Append(item uint64, data interface{}) error { 119 if item != batch.curItem { 120 return fmt.Errorf("%w: have %d want %d", errOutOrderInsertion, item, batch.curItem) 121 } 122 123 // Encode the item. 124 batch.encBuffer.Reset() 125 if err := rlp.Encode(&batch.encBuffer, data); err != nil { 126 return err 127 } 128 encItem := batch.encBuffer.data 129 if batch.sb != nil { 130 encItem = batch.sb.compress(encItem) 131 } 132 return batch.appendItem(encItem) 133 } 134 135 // AppendRaw injects a binary blob at the end of the freezer table. The item number is a 136 // precautionary parameter to ensure data correctness, but the table will reject already 137 // existing data. 138 func (batch *freezerTableBatch) AppendRaw(item uint64, blob []byte) error { 139 if item != batch.curItem { 140 return fmt.Errorf("%w: have %d want %d", errOutOrderInsertion, item, batch.curItem) 141 } 142 143 encItem := blob 144 if batch.sb != nil { 145 encItem = batch.sb.compress(blob) 146 } 147 return batch.appendItem(encItem) 148 } 149 150 func (batch *freezerTableBatch) appendItem(data []byte) error { 151 // Check if item fits into current data file. 152 itemSize := int64(len(data)) 153 itemOffset := batch.t.headBytes + int64(len(batch.dataBuffer)) 154 if itemOffset+itemSize > int64(batch.t.maxFileSize) { 155 // It doesn't fit, go to next file first. 156 if err := batch.commit(); err != nil { 157 return err 158 } 159 if err := batch.t.advanceHead(); err != nil { 160 return err 161 } 162 itemOffset = 0 163 } 164 165 // Put data to buffer. 166 batch.dataBuffer = append(batch.dataBuffer, data...) 167 batch.totalBytes += itemSize 168 169 // Put index entry to buffer. 170 entry := indexEntry{filenum: batch.t.headId, offset: uint32(itemOffset + itemSize)} 171 batch.indexBuffer = entry.append(batch.indexBuffer) 172 batch.curItem++ 173 174 return batch.maybeCommit() 175 } 176 177 // maybeCommit writes the buffered data if the buffer is full enough. 178 func (batch *freezerTableBatch) maybeCommit() error { 179 if len(batch.dataBuffer) > freezerBatchBufferLimit { 180 return batch.commit() 181 } 182 return nil 183 } 184 185 // commit writes the batched items to the backing freezerTable. 186 func (batch *freezerTableBatch) commit() error { 187 // Write data. 188 _, err := batch.t.head.Write(batch.dataBuffer) 189 if err != nil { 190 return err 191 } 192 dataSize := int64(len(batch.dataBuffer)) 193 batch.dataBuffer = batch.dataBuffer[:0] 194 195 // Write index. 196 _, err = batch.t.index.Write(batch.indexBuffer) 197 if err != nil { 198 return err 199 } 200 indexSize := int64(len(batch.indexBuffer)) 201 batch.indexBuffer = batch.indexBuffer[:0] 202 203 // Update headBytes of table. 204 batch.t.headBytes += dataSize 205 atomic.StoreUint64(&batch.t.items, batch.curItem) 206 207 // Update metrics. 208 batch.t.sizeGauge.Inc(dataSize + indexSize) 209 batch.t.writeMeter.Mark(dataSize + indexSize) 210 return nil 211 } 212 213 // snappyBuffer writes snappy in block format, and can be reused. It is 214 // reset when WriteTo is called. 215 type snappyBuffer struct { 216 dst []byte 217 } 218 219 // compress snappy-compresses the data. 220 func (s *snappyBuffer) compress(data []byte) []byte { 221 // The snappy library does not care what the capacity of the buffer is, 222 // but only checks the length. If the length is too small, it will 223 // allocate a brand new buffer. 224 // To avoid that, we check the required size here, and grow the size of the 225 // buffer to utilize the full capacity. 226 if n := snappy.MaxEncodedLen(len(data)); len(s.dst) < n { 227 if cap(s.dst) < n { 228 s.dst = make([]byte, n) 229 } 230 s.dst = s.dst[:n] 231 } 232 233 s.dst = snappy.Encode(s.dst, data) 234 return s.dst 235 } 236 237 // writeBuffer implements io.Writer for a byte slice. 238 type writeBuffer struct { 239 data []byte 240 } 241 242 func (wb *writeBuffer) Write(data []byte) (int, error) { 243 wb.data = append(wb.data, data...) 244 return len(data), nil 245 } 246 247 func (wb *writeBuffer) Reset() { 248 wb.data = wb.data[:0] 249 }