github.com/phillinzzz/newBsc@v1.1.6/core/rawdb/freezer_table.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package rawdb
    18  
    19  import (
    20  	"encoding/binary"
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"os"
    25  	"path/filepath"
    26  	"sync"
    27  	"sync/atomic"
    28  
    29  	"github.com/phillinzzz/newBsc/common"
    30  	"github.com/phillinzzz/newBsc/log"
    31  	"github.com/phillinzzz/newBsc/metrics"
    32  	"github.com/golang/snappy"
    33  )
    34  
    35  var (
    36  	// errClosed is returned if an operation attempts to read from or write to the
    37  	// freezer table after it has already been closed.
    38  	errClosed = errors.New("closed")
    39  
    40  	// errOutOfBounds is returned if the item requested is not contained within the
    41  	// freezer table.
    42  	errOutOfBounds = errors.New("out of bounds")
    43  
    44  	// errNotSupported is returned if the database doesn't support the required operation.
    45  	errNotSupported = errors.New("this operation is not supported")
    46  )
    47  
    48  // indexEntry contains the number/id of the file that the data resides in, aswell as the
    49  // offset within the file to the end of the data
    50  // In serialized form, the filenum is stored as uint16.
    51  type indexEntry struct {
    52  	filenum uint32 // stored as uint16 ( 2 bytes)
    53  	offset  uint32 // stored as uint32 ( 4 bytes)
    54  }
    55  
    56  const indexEntrySize = 6
    57  
    58  // unmarshallBinary deserializes binary b into the rawIndex entry.
    59  func (i *indexEntry) unmarshalBinary(b []byte) error {
    60  	i.filenum = uint32(binary.BigEndian.Uint16(b[:2]))
    61  	i.offset = binary.BigEndian.Uint32(b[2:6])
    62  	return nil
    63  }
    64  
    65  // marshallBinary serializes the rawIndex entry into binary.
    66  func (i *indexEntry) marshallBinary() []byte {
    67  	b := make([]byte, indexEntrySize)
    68  	binary.BigEndian.PutUint16(b[:2], uint16(i.filenum))
    69  	binary.BigEndian.PutUint32(b[2:6], i.offset)
    70  	return b
    71  }
    72  
    73  // freezerTable represents a single chained data table within the freezer (e.g. blocks).
    74  // It consists of a data file (snappy encoded arbitrary data blobs) and an indexEntry
    75  // file (uncompressed 64 bit indices into the data file).
    76  type freezerTable struct {
    77  	// WARNING: The `items` field is accessed atomically. On 32 bit platforms, only
    78  	// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
    79  	// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
    80  	items uint64 // Number of items stored in the table (including items removed from tail)
    81  
    82  	noCompression bool   // if true, disables snappy compression. Note: does not work retroactively
    83  	maxFileSize   uint32 // Max file size for data-files
    84  	name          string
    85  	path          string
    86  
    87  	head   *os.File            // File descriptor for the data head of the table
    88  	files  map[uint32]*os.File // open files
    89  	headId uint32              // number of the currently active head file
    90  	tailId uint32              // number of the earliest file
    91  	index  *os.File            // File descriptor for the indexEntry file of the table
    92  
    93  	// In the case that old items are deleted (from the tail), we use itemOffset
    94  	// to count how many historic items have gone missing.
    95  	itemOffset uint32 // Offset (number of discarded items)
    96  
    97  	headBytes  uint32        // Number of bytes written to the head file
    98  	readMeter  metrics.Meter // Meter for measuring the effective amount of data read
    99  	writeMeter metrics.Meter // Meter for measuring the effective amount of data written
   100  	sizeGauge  metrics.Gauge // Gauge for tracking the combined size of all freezer tables
   101  
   102  	logger log.Logger   // Logger with database path and table name ambedded
   103  	lock   sync.RWMutex // Mutex protecting the data file descriptors
   104  }
   105  
   106  // NewFreezerTable opens the given path as a freezer table.
   107  func NewFreezerTable(path, name string, disableSnappy bool) (*freezerTable, error) {
   108  	return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, disableSnappy)
   109  }
   110  
   111  // newTable opens a freezer table with default settings - 2G files
   112  func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, disableSnappy bool) (*freezerTable, error) {
   113  	return newCustomTable(path, name, readMeter, writeMeter, sizeGauge, 2*1000*1000*1000, disableSnappy)
   114  }
   115  
   116  // openFreezerFileForAppend opens a freezer table file and seeks to the end
   117  func openFreezerFileForAppend(filename string) (*os.File, error) {
   118  	// Open the file without the O_APPEND flag
   119  	// because it has differing behaviour during Truncate operations
   120  	// on different OS's
   121  	file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
   122  	if err != nil {
   123  		return nil, err
   124  	}
   125  	// Seek to end for append
   126  	if _, err = file.Seek(0, io.SeekEnd); err != nil {
   127  		return nil, err
   128  	}
   129  	return file, nil
   130  }
   131  
   132  // openFreezerFileForReadOnly opens a freezer table file for read only access
   133  func openFreezerFileForReadOnly(filename string) (*os.File, error) {
   134  	return os.OpenFile(filename, os.O_RDONLY, 0644)
   135  }
   136  
   137  // openFreezerFileTruncated opens a freezer table making sure it is truncated
   138  func openFreezerFileTruncated(filename string) (*os.File, error) {
   139  	return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
   140  }
   141  
   142  // truncateFreezerFile resizes a freezer table file and seeks to the end
   143  func truncateFreezerFile(file *os.File, size int64) error {
   144  	if err := file.Truncate(size); err != nil {
   145  		return err
   146  	}
   147  	// Seek to end for append
   148  	if _, err := file.Seek(0, io.SeekEnd); err != nil {
   149  		return err
   150  	}
   151  	return nil
   152  }
   153  
   154  // newCustomTable opens a freezer table, creating the data and index files if they are
   155  // non existent. Both files are truncated to the shortest common length to ensure
   156  // they don't go out of sync.
   157  func newCustomTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
   158  	// Ensure the containing directory exists and open the indexEntry file
   159  	if err := os.MkdirAll(path, 0755); err != nil {
   160  		return nil, err
   161  	}
   162  	var idxName string
   163  	if noCompression {
   164  		// Raw idx
   165  		idxName = fmt.Sprintf("%s.ridx", name)
   166  	} else {
   167  		// Compressed idx
   168  		idxName = fmt.Sprintf("%s.cidx", name)
   169  	}
   170  	offsets, err := openFreezerFileForAppend(filepath.Join(path, idxName))
   171  	if err != nil {
   172  		return nil, err
   173  	}
   174  	// Create the table and repair any past inconsistency
   175  	tab := &freezerTable{
   176  		index:         offsets,
   177  		files:         make(map[uint32]*os.File),
   178  		readMeter:     readMeter,
   179  		writeMeter:    writeMeter,
   180  		sizeGauge:     sizeGauge,
   181  		name:          name,
   182  		path:          path,
   183  		logger:        log.New("database", path, "table", name),
   184  		noCompression: noCompression,
   185  		maxFileSize:   maxFilesize,
   186  	}
   187  	if err := tab.repair(); err != nil {
   188  		tab.Close()
   189  		return nil, err
   190  	}
   191  	// Initialize the starting size counter
   192  	size, err := tab.sizeNolock()
   193  	if err != nil {
   194  		tab.Close()
   195  		return nil, err
   196  	}
   197  	tab.sizeGauge.Inc(int64(size))
   198  
   199  	return tab, nil
   200  }
   201  
   202  // repair cross checks the head and the index file and truncates them to
   203  // be in sync with each other after a potential crash / data loss.
   204  func (t *freezerTable) repair() error {
   205  	// Create a temporary offset buffer to init files with and read indexEntry into
   206  	buffer := make([]byte, indexEntrySize)
   207  
   208  	// If we've just created the files, initialize the index with the 0 indexEntry
   209  	stat, err := t.index.Stat()
   210  	if err != nil {
   211  		return err
   212  	}
   213  	if stat.Size() == 0 {
   214  		if _, err := t.index.Write(buffer); err != nil {
   215  			return err
   216  		}
   217  	}
   218  	// Ensure the index is a multiple of indexEntrySize bytes
   219  	if overflow := stat.Size() % indexEntrySize; overflow != 0 {
   220  		truncateFreezerFile(t.index, stat.Size()-overflow) // New file can't trigger this path
   221  	}
   222  	// Retrieve the file sizes and prepare for truncation
   223  	if stat, err = t.index.Stat(); err != nil {
   224  		return err
   225  	}
   226  	offsetsSize := stat.Size()
   227  
   228  	// Open the head file
   229  	var (
   230  		firstIndex  indexEntry
   231  		lastIndex   indexEntry
   232  		contentSize int64
   233  		contentExp  int64
   234  	)
   235  	// Read index zero, determine what file is the earliest
   236  	// and what item offset to use
   237  	t.index.ReadAt(buffer, 0)
   238  	firstIndex.unmarshalBinary(buffer)
   239  
   240  	t.tailId = firstIndex.filenum
   241  	t.itemOffset = firstIndex.offset
   242  
   243  	t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
   244  	lastIndex.unmarshalBinary(buffer)
   245  	t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForAppend)
   246  	if err != nil {
   247  		return err
   248  	}
   249  	if stat, err = t.head.Stat(); err != nil {
   250  		return err
   251  	}
   252  	contentSize = stat.Size()
   253  
   254  	// Keep truncating both files until they come in sync
   255  	contentExp = int64(lastIndex.offset)
   256  
   257  	for contentExp != contentSize {
   258  		// Truncate the head file to the last offset pointer
   259  		if contentExp < contentSize {
   260  			t.logger.Warn("Truncating dangling head", "indexed", common.StorageSize(contentExp), "stored", common.StorageSize(contentSize))
   261  			if err := truncateFreezerFile(t.head, contentExp); err != nil {
   262  				return err
   263  			}
   264  			contentSize = contentExp
   265  		}
   266  		// Truncate the index to point within the head file
   267  		if contentExp > contentSize {
   268  			t.logger.Warn("Truncating dangling indexes", "indexed", common.StorageSize(contentExp), "stored", common.StorageSize(contentSize))
   269  			if err := truncateFreezerFile(t.index, offsetsSize-indexEntrySize); err != nil {
   270  				return err
   271  			}
   272  			offsetsSize -= indexEntrySize
   273  			t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
   274  			var newLastIndex indexEntry
   275  			newLastIndex.unmarshalBinary(buffer)
   276  			// We might have slipped back into an earlier head-file here
   277  			if newLastIndex.filenum != lastIndex.filenum {
   278  				// Release earlier opened file
   279  				t.releaseFile(lastIndex.filenum)
   280  				if t.head, err = t.openFile(newLastIndex.filenum, openFreezerFileForAppend); err != nil {
   281  					return err
   282  				}
   283  				if stat, err = t.head.Stat(); err != nil {
   284  					// TODO, anything more we can do here?
   285  					// A data file has gone missing...
   286  					return err
   287  				}
   288  				contentSize = stat.Size()
   289  			}
   290  			lastIndex = newLastIndex
   291  			contentExp = int64(lastIndex.offset)
   292  		}
   293  	}
   294  	// Ensure all reparation changes have been written to disk
   295  	if err := t.index.Sync(); err != nil {
   296  		return err
   297  	}
   298  	if err := t.head.Sync(); err != nil {
   299  		return err
   300  	}
   301  	// Update the item and byte counters and return
   302  	t.items = uint64(t.itemOffset) + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
   303  	t.headBytes = uint32(contentSize)
   304  	t.headId = lastIndex.filenum
   305  
   306  	// Close opened files and preopen all files
   307  	if err := t.preopen(); err != nil {
   308  		return err
   309  	}
   310  	t.logger.Debug("Chain freezer table opened", "items", t.items, "size", common.StorageSize(t.headBytes))
   311  	return nil
   312  }
   313  
   314  // preopen opens all files that the freezer will need. This method should be called from an init-context,
   315  // since it assumes that it doesn't have to bother with locking
   316  // The rationale for doing preopen is to not have to do it from within Retrieve, thus not needing to ever
   317  // obtain a write-lock within Retrieve.
   318  func (t *freezerTable) preopen() (err error) {
   319  	// The repair might have already opened (some) files
   320  	t.releaseFilesAfter(0, false)
   321  	// Open all except head in RDONLY
   322  	for i := t.tailId; i < t.headId; i++ {
   323  		if _, err = t.openFile(i, openFreezerFileForReadOnly); err != nil {
   324  			return err
   325  		}
   326  	}
   327  	// Open head in read/write
   328  	t.head, err = t.openFile(t.headId, openFreezerFileForAppend)
   329  	return err
   330  }
   331  
   332  // truncate discards any recent data above the provided threshold number.
   333  func (t *freezerTable) truncate(items uint64) error {
   334  	t.lock.Lock()
   335  	defer t.lock.Unlock()
   336  
   337  	// If our item count is correct, don't do anything
   338  	existing := atomic.LoadUint64(&t.items)
   339  	if existing <= items {
   340  		return nil
   341  	}
   342  	// We need to truncate, save the old size for metrics tracking
   343  	oldSize, err := t.sizeNolock()
   344  	if err != nil {
   345  		return err
   346  	}
   347  	// Something's out of sync, truncate the table's offset index
   348  	log := t.logger.Debug
   349  	if existing > items+1 {
   350  		log = t.logger.Warn // Only loud warn if we delete multiple items
   351  	}
   352  	log("Truncating freezer table", "items", existing, "limit", items)
   353  	if err := truncateFreezerFile(t.index, int64(items+1)*indexEntrySize); err != nil {
   354  		return err
   355  	}
   356  	// Calculate the new expected size of the data file and truncate it
   357  	buffer := make([]byte, indexEntrySize)
   358  	if _, err := t.index.ReadAt(buffer, int64(items*indexEntrySize)); err != nil {
   359  		return err
   360  	}
   361  	var expected indexEntry
   362  	expected.unmarshalBinary(buffer)
   363  
   364  	// We might need to truncate back to older files
   365  	if expected.filenum != t.headId {
   366  		// If already open for reading, force-reopen for writing
   367  		t.releaseFile(expected.filenum)
   368  		newHead, err := t.openFile(expected.filenum, openFreezerFileForAppend)
   369  		if err != nil {
   370  			return err
   371  		}
   372  		// Release any files _after the current head -- both the previous head
   373  		// and any files which may have been opened for reading
   374  		t.releaseFilesAfter(expected.filenum, true)
   375  		// Set back the historic head
   376  		t.head = newHead
   377  		atomic.StoreUint32(&t.headId, expected.filenum)
   378  	}
   379  	if err := truncateFreezerFile(t.head, int64(expected.offset)); err != nil {
   380  		return err
   381  	}
   382  	// All data files truncated, set internal counters and return
   383  	atomic.StoreUint64(&t.items, items)
   384  	atomic.StoreUint32(&t.headBytes, expected.offset)
   385  
   386  	// Retrieve the new size and update the total size counter
   387  	newSize, err := t.sizeNolock()
   388  	if err != nil {
   389  		return err
   390  	}
   391  	t.sizeGauge.Dec(int64(oldSize - newSize))
   392  
   393  	return nil
   394  }
   395  
   396  // Close closes all opened files.
   397  func (t *freezerTable) Close() error {
   398  	t.lock.Lock()
   399  	defer t.lock.Unlock()
   400  
   401  	var errs []error
   402  	if err := t.index.Close(); err != nil {
   403  		errs = append(errs, err)
   404  	}
   405  	t.index = nil
   406  
   407  	for _, f := range t.files {
   408  		if err := f.Close(); err != nil {
   409  			errs = append(errs, err)
   410  		}
   411  	}
   412  	t.head = nil
   413  
   414  	if errs != nil {
   415  		return fmt.Errorf("%v", errs)
   416  	}
   417  	return nil
   418  }
   419  
   420  // openFile assumes that the write-lock is held by the caller
   421  func (t *freezerTable) openFile(num uint32, opener func(string) (*os.File, error)) (f *os.File, err error) {
   422  	var exist bool
   423  	if f, exist = t.files[num]; !exist {
   424  		var name string
   425  		if t.noCompression {
   426  			name = fmt.Sprintf("%s.%04d.rdat", t.name, num)
   427  		} else {
   428  			name = fmt.Sprintf("%s.%04d.cdat", t.name, num)
   429  		}
   430  		f, err = opener(filepath.Join(t.path, name))
   431  		if err != nil {
   432  			return nil, err
   433  		}
   434  		t.files[num] = f
   435  	}
   436  	return f, err
   437  }
   438  
   439  // releaseFile closes a file, and removes it from the open file cache.
   440  // Assumes that the caller holds the write lock
   441  func (t *freezerTable) releaseFile(num uint32) {
   442  	if f, exist := t.files[num]; exist {
   443  		delete(t.files, num)
   444  		f.Close()
   445  	}
   446  }
   447  
   448  // releaseFilesAfter closes all open files with a higher number, and optionally also deletes the files
   449  func (t *freezerTable) releaseFilesAfter(num uint32, remove bool) {
   450  	for fnum, f := range t.files {
   451  		if fnum > num {
   452  			delete(t.files, fnum)
   453  			f.Close()
   454  			if remove {
   455  				os.Remove(f.Name())
   456  			}
   457  		}
   458  	}
   459  }
   460  
   461  // Append injects a binary blob at the end of the freezer table. The item number
   462  // is a precautionary parameter to ensure data correctness, but the table will
   463  // reject already existing data.
   464  //
   465  // Note, this method will *not* flush any data to disk so be sure to explicitly
   466  // fsync before irreversibly deleting data from the database.
   467  func (t *freezerTable) Append(item uint64, blob []byte) error {
   468  	// Encode the blob before the lock portion
   469  	if !t.noCompression {
   470  		blob = snappy.Encode(nil, blob)
   471  	}
   472  	// Read lock prevents competition with truncate
   473  	retry, err := t.append(item, blob, false)
   474  	if err != nil {
   475  		return err
   476  	}
   477  	if retry {
   478  		// Read lock was insufficient, retry with a writelock
   479  		_, err = t.append(item, blob, true)
   480  	}
   481  	return err
   482  }
   483  
   484  // append injects a binary blob at the end of the freezer table.
   485  // Normally, inserts do not require holding the write-lock, so it should be invoked with 'wlock' set to
   486  // false.
   487  // However, if the data will grown the current file out of bounds, then this
   488  // method will return 'true, nil', indicating that the caller should retry, this time
   489  // with 'wlock' set to true.
   490  func (t *freezerTable) append(item uint64, encodedBlob []byte, wlock bool) (bool, error) {
   491  	if wlock {
   492  		t.lock.Lock()
   493  		defer t.lock.Unlock()
   494  	} else {
   495  		t.lock.RLock()
   496  		defer t.lock.RUnlock()
   497  	}
   498  	// Ensure the table is still accessible
   499  	if t.index == nil || t.head == nil {
   500  		return false, errClosed
   501  	}
   502  	// Ensure only the next item can be written, nothing else
   503  	if atomic.LoadUint64(&t.items) != item {
   504  		return false, fmt.Errorf("appending unexpected item: want %d, have %d", t.items, item)
   505  	}
   506  	bLen := uint32(len(encodedBlob))
   507  	if t.headBytes+bLen < bLen ||
   508  		t.headBytes+bLen > t.maxFileSize {
   509  		// Writing would overflow, so we need to open a new data file.
   510  		// If we don't already hold the writelock, abort and let the caller
   511  		// invoke this method a second time.
   512  		if !wlock {
   513  			return true, nil
   514  		}
   515  		nextID := atomic.LoadUint32(&t.headId) + 1
   516  		// We open the next file in truncated mode -- if this file already
   517  		// exists, we need to start over from scratch on it
   518  		newHead, err := t.openFile(nextID, openFreezerFileTruncated)
   519  		if err != nil {
   520  			return false, err
   521  		}
   522  		// Close old file, and reopen in RDONLY mode
   523  		t.releaseFile(t.headId)
   524  		t.openFile(t.headId, openFreezerFileForReadOnly)
   525  
   526  		// Swap out the current head
   527  		t.head = newHead
   528  		atomic.StoreUint32(&t.headBytes, 0)
   529  		atomic.StoreUint32(&t.headId, nextID)
   530  	}
   531  	if _, err := t.head.Write(encodedBlob); err != nil {
   532  		return false, err
   533  	}
   534  	newOffset := atomic.AddUint32(&t.headBytes, bLen)
   535  	idx := indexEntry{
   536  		filenum: atomic.LoadUint32(&t.headId),
   537  		offset:  newOffset,
   538  	}
   539  	// Write indexEntry
   540  	t.index.Write(idx.marshallBinary())
   541  
   542  	t.writeMeter.Mark(int64(bLen + indexEntrySize))
   543  	t.sizeGauge.Inc(int64(bLen + indexEntrySize))
   544  
   545  	atomic.AddUint64(&t.items, 1)
   546  	return false, nil
   547  }
   548  
   549  // getBounds returns the indexes for the item
   550  // returns start, end, filenumber and error
   551  func (t *freezerTable) getBounds(item uint64) (uint32, uint32, uint32, error) {
   552  	buffer := make([]byte, indexEntrySize)
   553  	var startIdx, endIdx indexEntry
   554  	// Read second index
   555  	if _, err := t.index.ReadAt(buffer, int64((item+1)*indexEntrySize)); err != nil {
   556  		return 0, 0, 0, err
   557  	}
   558  	endIdx.unmarshalBinary(buffer)
   559  	// Read first index (unless it's the very first item)
   560  	if item != 0 {
   561  		if _, err := t.index.ReadAt(buffer, int64(item*indexEntrySize)); err != nil {
   562  			return 0, 0, 0, err
   563  		}
   564  		startIdx.unmarshalBinary(buffer)
   565  	} else {
   566  		// Special case if we're reading the first item in the freezer. We assume that
   567  		// the first item always start from zero(regarding the deletion, we
   568  		// only support deletion by files, so that the assumption is held).
   569  		// This means we can use the first item metadata to carry information about
   570  		// the 'global' offset, for the deletion-case
   571  		return 0, endIdx.offset, endIdx.filenum, nil
   572  	}
   573  	if startIdx.filenum != endIdx.filenum {
   574  		// If a piece of data 'crosses' a data-file,
   575  		// it's actually in one piece on the second data-file.
   576  		// We return a zero-indexEntry for the second file as start
   577  		return 0, endIdx.offset, endIdx.filenum, nil
   578  	}
   579  	return startIdx.offset, endIdx.offset, endIdx.filenum, nil
   580  }
   581  
   582  // Retrieve looks up the data offset of an item with the given number and retrieves
   583  // the raw binary blob from the data file.
   584  func (t *freezerTable) Retrieve(item uint64) ([]byte, error) {
   585  	blob, err := t.retrieve(item)
   586  	if err != nil {
   587  		return nil, err
   588  	}
   589  	if t.noCompression {
   590  		return blob, nil
   591  	}
   592  	return snappy.Decode(nil, blob)
   593  }
   594  
   595  // retrieve looks up the data offset of an item with the given number and retrieves
   596  // the raw binary blob from the data file. OBS! This method does not decode
   597  // compressed data.
   598  func (t *freezerTable) retrieve(item uint64) ([]byte, error) {
   599  	t.lock.RLock()
   600  	defer t.lock.RUnlock()
   601  	// Ensure the table and the item is accessible
   602  	if t.index == nil || t.head == nil {
   603  		return nil, errClosed
   604  	}
   605  	if atomic.LoadUint64(&t.items) <= item {
   606  		return nil, errOutOfBounds
   607  	}
   608  	// Ensure the item was not deleted from the tail either
   609  	if uint64(t.itemOffset) > item {
   610  		return nil, errOutOfBounds
   611  	}
   612  	startOffset, endOffset, filenum, err := t.getBounds(item - uint64(t.itemOffset))
   613  	if err != nil {
   614  		return nil, err
   615  	}
   616  	dataFile, exist := t.files[filenum]
   617  	if !exist {
   618  		return nil, fmt.Errorf("missing data file %d", filenum)
   619  	}
   620  	// Retrieve the data itself, decompress and return
   621  	blob := make([]byte, endOffset-startOffset)
   622  	if _, err := dataFile.ReadAt(blob, int64(startOffset)); err != nil {
   623  		return nil, err
   624  	}
   625  	t.readMeter.Mark(int64(len(blob) + 2*indexEntrySize))
   626  	return blob, nil
   627  }
   628  
   629  // has returns an indicator whether the specified number data
   630  // exists in the freezer table.
   631  func (t *freezerTable) has(number uint64) bool {
   632  	return atomic.LoadUint64(&t.items) > number
   633  }
   634  
   635  // size returns the total data size in the freezer table.
   636  func (t *freezerTable) size() (uint64, error) {
   637  	t.lock.RLock()
   638  	defer t.lock.RUnlock()
   639  
   640  	return t.sizeNolock()
   641  }
   642  
   643  // sizeNolock returns the total data size in the freezer table without obtaining
   644  // the mutex first.
   645  func (t *freezerTable) sizeNolock() (uint64, error) {
   646  	stat, err := t.index.Stat()
   647  	if err != nil {
   648  		return 0, err
   649  	}
   650  	total := uint64(t.maxFileSize)*uint64(t.headId-t.tailId) + uint64(t.headBytes) + uint64(stat.Size())
   651  	return total, nil
   652  }
   653  
   654  // Sync pushes any pending data from memory out to disk. This is an expensive
   655  // operation, so use it with care.
   656  func (t *freezerTable) Sync() error {
   657  	if err := t.index.Sync(); err != nil {
   658  		return err
   659  	}
   660  	return t.head.Sync()
   661  }
   662  
   663  // DumpIndex is a debug print utility function, mainly for testing. It can also
   664  // be used to analyse a live freezer table index.
   665  func (t *freezerTable) DumpIndex(start, stop int64) {
   666  	buf := make([]byte, indexEntrySize)
   667  
   668  	fmt.Printf("| number | fileno | offset |\n")
   669  	fmt.Printf("|--------|--------|--------|\n")
   670  
   671  	for i := uint64(start); ; i++ {
   672  		if _, err := t.index.ReadAt(buf, int64(i*indexEntrySize)); err != nil {
   673  			break
   674  		}
   675  		var entry indexEntry
   676  		entry.unmarshalBinary(buf)
   677  		fmt.Printf("|  %03d   |  %03d   |  %03d   | \n", i, entry.filenum, entry.offset)
   678  		if stop > 0 && i >= uint64(stop) {
   679  			break
   680  		}
   681  	}
   682  	fmt.Printf("|--------------------------|\n")
   683  }