github.com/aigarnetwork/aigar@v0.0.0-20191115204914-d59a6eb70f8e/core/rawdb/freezer_table.go (about)

     1  //  Copyright 2018 The go-ethereum Authors
     2  //  Copyright 2019 The go-aigar Authors
     3  //  This file is part of the go-aigar library.
     4  //
     5  //  The go-aigar library is free software: you can redistribute it and/or modify
     6  //  it under the terms of the GNU Lesser General Public License as published by
     7  //  the Free Software Foundation, either version 3 of the License, or
     8  //  (at your option) any later version.
     9  //
    10  //  The go-aigar library is distributed in the hope that it will be useful,
    11  //  but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  //  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  //  GNU Lesser General Public License for more details.
    14  //
    15  //  You should have received a copy of the GNU Lesser General Public License
    16  //  along with the go-aigar library. If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package rawdb
    19  
    20  import (
    21  	"encoding/binary"
    22  	"errors"
    23  	"fmt"
    24  	"io"
    25  	"os"
    26  	"path/filepath"
    27  	"sync"
    28  	"sync/atomic"
    29  
    30  	"github.com/AigarNetwork/aigar/common"
    31  	"github.com/AigarNetwork/aigar/log"
    32  	"github.com/AigarNetwork/aigar/metrics"
    33  	"github.com/golang/snappy"
    34  )
    35  
    36  var (
    37  	// errClosed is returned if an operation attempts to read from or write to the
    38  	// freezer table after it has already been closed.
    39  	errClosed = errors.New("closed")
    40  
    41  	// errOutOfBounds is returned if the item requested is not contained within the
    42  	// freezer table.
    43  	errOutOfBounds = errors.New("out of bounds")
    44  
    45  	// errNotSupported is returned if the database doesn't support the required operation.
    46  	errNotSupported = errors.New("this operation is not supported")
    47  )
    48  
    49  // indexEntry contains the number/id of the file that the data resides in, aswell as the
    50  // offset within the file to the end of the data
    51  // In serialized form, the filenum is stored as uint16.
    52  type indexEntry struct {
    53  	filenum uint32 // stored as uint16 ( 2 bytes)
    54  	offset  uint32 // stored as uint32 ( 4 bytes)
    55  }
    56  
    57  const indexEntrySize = 6
    58  
    59  // unmarshallBinary deserializes binary b into the rawIndex entry.
    60  func (i *indexEntry) unmarshalBinary(b []byte) error {
    61  	i.filenum = uint32(binary.BigEndian.Uint16(b[:2]))
    62  	i.offset = binary.BigEndian.Uint32(b[2:6])
    63  	return nil
    64  }
    65  
    66  // marshallBinary serializes the rawIndex entry into binary.
    67  func (i *indexEntry) marshallBinary() []byte {
    68  	b := make([]byte, indexEntrySize)
    69  	binary.BigEndian.PutUint16(b[:2], uint16(i.filenum))
    70  	binary.BigEndian.PutUint32(b[2:6], i.offset)
    71  	return b
    72  }
    73  
    74  // freezerTable represents a single chained data table within the freezer (e.g. blocks).
    75  // It consists of a data file (snappy encoded arbitrary data blobs) and an indexEntry
    76  // file (uncompressed 64 bit indices into the data file).
    77  type freezerTable struct {
    78  	// WARNING: The `items` field is accessed atomically. On 32 bit platforms, only
    79  	// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
    80  	// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
    81  	items uint64 // Number of items stored in the table (including items removed from tail)
    82  
    83  	noCompression bool   // if true, disables snappy compression. Note: does not work retroactively
    84  	maxFileSize   uint32 // Max file size for data-files
    85  	name          string
    86  	path          string
    87  
    88  	head   *os.File            // File descriptor for the data head of the table
    89  	files  map[uint32]*os.File // open files
    90  	headId uint32              // number of the currently active head file
    91  	tailId uint32              // number of the earliest file
    92  	index  *os.File            // File descriptor for the indexEntry file of the table
    93  
    94  	// In the case that old items are deleted (from the tail), we use itemOffset
    95  	// to count how many historic items have gone missing.
    96  	itemOffset uint32 // Offset (number of discarded items)
    97  
    98  	headBytes  uint32        // Number of bytes written to the head file
    99  	readMeter  metrics.Meter // Meter for measuring the effective amount of data read
   100  	writeMeter metrics.Meter // Meter for measuring the effective amount of data written
   101  	sizeGauge  metrics.Gauge // Gauge for tracking the combined size of all freezer tables
   102  
   103  	logger log.Logger   // Logger with database path and table name ambedded
   104  	lock   sync.RWMutex // Mutex protecting the data file descriptors
   105  }
   106  
   107  // newTable opens a freezer table with default settings - 2G files
   108  func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, disableSnappy bool) (*freezerTable, error) {
   109  	return newCustomTable(path, name, readMeter, writeMeter, sizeGauge, 2*1000*1000*1000, disableSnappy)
   110  }
   111  
   112  // openFreezerFileForAppend opens a freezer table file and seeks to the end
   113  func openFreezerFileForAppend(filename string) (*os.File, error) {
   114  	// Open the file without the O_APPEND flag
   115  	// because it has differing behaviour during Truncate operations
   116  	// on different OS's
   117  	file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
   118  	if err != nil {
   119  		return nil, err
   120  	}
   121  	// Seek to end for append
   122  	if _, err = file.Seek(0, io.SeekEnd); err != nil {
   123  		return nil, err
   124  	}
   125  	return file, nil
   126  }
   127  
   128  // openFreezerFileForReadOnly opens a freezer table file for read only access
   129  func openFreezerFileForReadOnly(filename string) (*os.File, error) {
   130  	return os.OpenFile(filename, os.O_RDONLY, 0644)
   131  }
   132  
   133  // openFreezerFileTruncated opens a freezer table making sure it is truncated
   134  func openFreezerFileTruncated(filename string) (*os.File, error) {
   135  	return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
   136  }
   137  
   138  // truncateFreezerFile resizes a freezer table file and seeks to the end
   139  func truncateFreezerFile(file *os.File, size int64) error {
   140  	if err := file.Truncate(size); err != nil {
   141  		return err
   142  	}
   143  	// Seek to end for append
   144  	if _, err := file.Seek(0, io.SeekEnd); err != nil {
   145  		return err
   146  	}
   147  	return nil
   148  }
   149  
   150  // newCustomTable opens a freezer table, creating the data and index files if they are
   151  // non existent. Both files are truncated to the shortest common length to ensure
   152  // they don't go out of sync.
   153  func newCustomTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
   154  	// Ensure the containing directory exists and open the indexEntry file
   155  	if err := os.MkdirAll(path, 0755); err != nil {
   156  		return nil, err
   157  	}
   158  	var idxName string
   159  	if noCompression {
   160  		// Raw idx
   161  		idxName = fmt.Sprintf("%s.ridx", name)
   162  	} else {
   163  		// Compressed idx
   164  		idxName = fmt.Sprintf("%s.cidx", name)
   165  	}
   166  	offsets, err := openFreezerFileForAppend(filepath.Join(path, idxName))
   167  	if err != nil {
   168  		return nil, err
   169  	}
   170  	// Create the table and repair any past inconsistency
   171  	tab := &freezerTable{
   172  		index:         offsets,
   173  		files:         make(map[uint32]*os.File),
   174  		readMeter:     readMeter,
   175  		writeMeter:    writeMeter,
   176  		sizeGauge:     sizeGauge,
   177  		name:          name,
   178  		path:          path,
   179  		logger:        log.New("database", path, "table", name),
   180  		noCompression: noCompression,
   181  		maxFileSize:   maxFilesize,
   182  	}
   183  	if err := tab.repair(); err != nil {
   184  		tab.Close()
   185  		return nil, err
   186  	}
   187  	// Initialize the starting size counter
   188  	size, err := tab.sizeNolock()
   189  	if err != nil {
   190  		tab.Close()
   191  		return nil, err
   192  	}
   193  	tab.sizeGauge.Inc(int64(size))
   194  
   195  	return tab, nil
   196  }
   197  
   198  // repair cross checks the head and the index file and truncates them to
   199  // be in sync with each other after a potential crash / data loss.
   200  func (t *freezerTable) repair() error {
   201  	// Create a temporary offset buffer to init files with and read indexEntry into
   202  	buffer := make([]byte, indexEntrySize)
   203  
   204  	// If we've just created the files, initialize the index with the 0 indexEntry
   205  	stat, err := t.index.Stat()
   206  	if err != nil {
   207  		return err
   208  	}
   209  	if stat.Size() == 0 {
   210  		if _, err := t.index.Write(buffer); err != nil {
   211  			return err
   212  		}
   213  	}
   214  	// Ensure the index is a multiple of indexEntrySize bytes
   215  	if overflow := stat.Size() % indexEntrySize; overflow != 0 {
   216  		truncateFreezerFile(t.index, stat.Size()-overflow) // New file can't trigger this path
   217  	}
   218  	// Retrieve the file sizes and prepare for truncation
   219  	if stat, err = t.index.Stat(); err != nil {
   220  		return err
   221  	}
   222  	offsetsSize := stat.Size()
   223  
   224  	// Open the head file
   225  	var (
   226  		firstIndex  indexEntry
   227  		lastIndex   indexEntry
   228  		contentSize int64
   229  		contentExp  int64
   230  	)
   231  	// Read index zero, determine what file is the earliest
   232  	// and what item offset to use
   233  	t.index.ReadAt(buffer, 0)
   234  	firstIndex.unmarshalBinary(buffer)
   235  
   236  	t.tailId = firstIndex.offset
   237  	t.itemOffset = firstIndex.filenum
   238  
   239  	t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
   240  	lastIndex.unmarshalBinary(buffer)
   241  	t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForAppend)
   242  	if err != nil {
   243  		return err
   244  	}
   245  	if stat, err = t.head.Stat(); err != nil {
   246  		return err
   247  	}
   248  	contentSize = stat.Size()
   249  
   250  	// Keep truncating both files until they come in sync
   251  	contentExp = int64(lastIndex.offset)
   252  
   253  	for contentExp != contentSize {
   254  		// Truncate the head file to the last offset pointer
   255  		if contentExp < contentSize {
   256  			t.logger.Warn("Truncating dangling head", "indexed", common.StorageSize(contentExp), "stored", common.StorageSize(contentSize))
   257  			if err := truncateFreezerFile(t.head, contentExp); err != nil {
   258  				return err
   259  			}
   260  			contentSize = contentExp
   261  		}
   262  		// Truncate the index to point within the head file
   263  		if contentExp > contentSize {
   264  			t.logger.Warn("Truncating dangling indexes", "indexed", common.StorageSize(contentExp), "stored", common.StorageSize(contentSize))
   265  			if err := truncateFreezerFile(t.index, offsetsSize-indexEntrySize); err != nil {
   266  				return err
   267  			}
   268  			offsetsSize -= indexEntrySize
   269  			t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
   270  			var newLastIndex indexEntry
   271  			newLastIndex.unmarshalBinary(buffer)
   272  			// We might have slipped back into an earlier head-file here
   273  			if newLastIndex.filenum != lastIndex.filenum {
   274  				// Release earlier opened file
   275  				t.releaseFile(lastIndex.filenum)
   276  				if t.head, err = t.openFile(newLastIndex.filenum, openFreezerFileForAppend); err != nil {
   277  					return err
   278  				}
   279  				if stat, err = t.head.Stat(); err != nil {
   280  					// TODO, anything more we can do here?
   281  					// A data file has gone missing...
   282  					return err
   283  				}
   284  				contentSize = stat.Size()
   285  			}
   286  			lastIndex = newLastIndex
   287  			contentExp = int64(lastIndex.offset)
   288  		}
   289  	}
   290  	// Ensure all reparation changes have been written to disk
   291  	if err := t.index.Sync(); err != nil {
   292  		return err
   293  	}
   294  	if err := t.head.Sync(); err != nil {
   295  		return err
   296  	}
   297  	// Update the item and byte counters and return
   298  	t.items = uint64(t.itemOffset) + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
   299  	t.headBytes = uint32(contentSize)
   300  	t.headId = lastIndex.filenum
   301  
   302  	// Close opened files and preopen all files
   303  	if err := t.preopen(); err != nil {
   304  		return err
   305  	}
   306  	t.logger.Debug("Chain freezer table opened", "items", t.items, "size", common.StorageSize(t.headBytes))
   307  	return nil
   308  }
   309  
   310  // preopen opens all files that the freezer will need. This method should be called from an init-context,
   311  // since it assumes that it doesn't have to bother with locking
   312  // The rationale for doing preopen is to not have to do it from within Retrieve, thus not needing to ever
   313  // obtain a write-lock within Retrieve.
   314  func (t *freezerTable) preopen() (err error) {
   315  	// The repair might have already opened (some) files
   316  	t.releaseFilesAfter(0, false)
   317  	// Open all except head in RDONLY
   318  	for i := t.tailId; i < t.headId; i++ {
   319  		if _, err = t.openFile(i, openFreezerFileForReadOnly); err != nil {
   320  			return err
   321  		}
   322  	}
   323  	// Open head in read/write
   324  	t.head, err = t.openFile(t.headId, openFreezerFileForAppend)
   325  	return err
   326  }
   327  
   328  // truncate discards any recent data above the provided threshold number.
   329  func (t *freezerTable) truncate(items uint64) error {
   330  	t.lock.Lock()
   331  	defer t.lock.Unlock()
   332  
   333  	// If our item count is correct, don't do anything
   334  	if atomic.LoadUint64(&t.items) <= items {
   335  		return nil
   336  	}
   337  	// We need to truncate, save the old size for metrics tracking
   338  	oldSize, err := t.sizeNolock()
   339  	if err != nil {
   340  		return err
   341  	}
   342  	// Something's out of sync, truncate the table's offset index
   343  	t.logger.Warn("Truncating freezer table", "items", t.items, "limit", items)
   344  	if err := truncateFreezerFile(t.index, int64(items+1)*indexEntrySize); err != nil {
   345  		return err
   346  	}
   347  	// Calculate the new expected size of the data file and truncate it
   348  	buffer := make([]byte, indexEntrySize)
   349  	if _, err := t.index.ReadAt(buffer, int64(items*indexEntrySize)); err != nil {
   350  		return err
   351  	}
   352  	var expected indexEntry
   353  	expected.unmarshalBinary(buffer)
   354  
   355  	// We might need to truncate back to older files
   356  	if expected.filenum != t.headId {
   357  		// If already open for reading, force-reopen for writing
   358  		t.releaseFile(expected.filenum)
   359  		newHead, err := t.openFile(expected.filenum, openFreezerFileForAppend)
   360  		if err != nil {
   361  			return err
   362  		}
   363  		// Release any files _after the current head -- both the previous head
   364  		// and any files which may have been opened for reading
   365  		t.releaseFilesAfter(expected.filenum, true)
   366  		// Set back the historic head
   367  		t.head = newHead
   368  		atomic.StoreUint32(&t.headId, expected.filenum)
   369  	}
   370  	if err := truncateFreezerFile(t.head, int64(expected.offset)); err != nil {
   371  		return err
   372  	}
   373  	// All data files truncated, set internal counters and return
   374  	atomic.StoreUint64(&t.items, items)
   375  	atomic.StoreUint32(&t.headBytes, expected.offset)
   376  
   377  	// Retrieve the new size and update the total size counter
   378  	newSize, err := t.sizeNolock()
   379  	if err != nil {
   380  		return err
   381  	}
   382  	t.sizeGauge.Dec(int64(oldSize - newSize))
   383  
   384  	return nil
   385  }
   386  
   387  // Close closes all opened files.
   388  func (t *freezerTable) Close() error {
   389  	t.lock.Lock()
   390  	defer t.lock.Unlock()
   391  
   392  	var errs []error
   393  	if err := t.index.Close(); err != nil {
   394  		errs = append(errs, err)
   395  	}
   396  	t.index = nil
   397  
   398  	for _, f := range t.files {
   399  		if err := f.Close(); err != nil {
   400  			errs = append(errs, err)
   401  		}
   402  	}
   403  	t.head = nil
   404  
   405  	if errs != nil {
   406  		return fmt.Errorf("%v", errs)
   407  	}
   408  	return nil
   409  }
   410  
   411  // openFile assumes that the write-lock is held by the caller
   412  func (t *freezerTable) openFile(num uint32, opener func(string) (*os.File, error)) (f *os.File, err error) {
   413  	var exist bool
   414  	if f, exist = t.files[num]; !exist {
   415  		var name string
   416  		if t.noCompression {
   417  			name = fmt.Sprintf("%s.%04d.rdat", t.name, num)
   418  		} else {
   419  			name = fmt.Sprintf("%s.%04d.cdat", t.name, num)
   420  		}
   421  		f, err = opener(filepath.Join(t.path, name))
   422  		if err != nil {
   423  			return nil, err
   424  		}
   425  		t.files[num] = f
   426  	}
   427  	return f, err
   428  }
   429  
   430  // releaseFile closes a file, and removes it from the open file cache.
   431  // Assumes that the caller holds the write lock
   432  func (t *freezerTable) releaseFile(num uint32) {
   433  	if f, exist := t.files[num]; exist {
   434  		delete(t.files, num)
   435  		f.Close()
   436  	}
   437  }
   438  
   439  // releaseFilesAfter closes all open files with a higher number, and optionally also deletes the files
   440  func (t *freezerTable) releaseFilesAfter(num uint32, remove bool) {
   441  	for fnum, f := range t.files {
   442  		if fnum > num {
   443  			delete(t.files, fnum)
   444  			f.Close()
   445  			if remove {
   446  				os.Remove(f.Name())
   447  			}
   448  		}
   449  	}
   450  }
   451  
   452  // Append injects a binary blob at the end of the freezer table. The item number
   453  // is a precautionary parameter to ensure data correctness, but the table will
   454  // reject already existing data.
   455  //
   456  // Note, this method will *not* flush any data to disk so be sure to explicitly
   457  // fsync before irreversibly deleting data from the database.
   458  func (t *freezerTable) Append(item uint64, blob []byte) error {
   459  	// Read lock prevents competition with truncate
   460  	t.lock.RLock()
   461  	// Ensure the table is still accessible
   462  	if t.index == nil || t.head == nil {
   463  		t.lock.RUnlock()
   464  		return errClosed
   465  	}
   466  	// Ensure only the next item can be written, nothing else
   467  	if atomic.LoadUint64(&t.items) != item {
   468  		t.lock.RUnlock()
   469  		return fmt.Errorf("appending unexpected item: want %d, have %d", t.items, item)
   470  	}
   471  	// Encode the blob and write it into the data file
   472  	if !t.noCompression {
   473  		blob = snappy.Encode(nil, blob)
   474  	}
   475  	bLen := uint32(len(blob))
   476  	if t.headBytes+bLen < bLen ||
   477  		t.headBytes+bLen > t.maxFileSize {
   478  		// we need a new file, writing would overflow
   479  		t.lock.RUnlock()
   480  		t.lock.Lock()
   481  		nextID := atomic.LoadUint32(&t.headId) + 1
   482  		// We open the next file in truncated mode -- if this file already
   483  		// exists, we need to start over from scratch on it
   484  		newHead, err := t.openFile(nextID, openFreezerFileTruncated)
   485  		if err != nil {
   486  			t.lock.Unlock()
   487  			return err
   488  		}
   489  		// Close old file, and reopen in RDONLY mode
   490  		t.releaseFile(t.headId)
   491  		t.openFile(t.headId, openFreezerFileForReadOnly)
   492  
   493  		// Swap out the current head
   494  		t.head = newHead
   495  		atomic.StoreUint32(&t.headBytes, 0)
   496  		atomic.StoreUint32(&t.headId, nextID)
   497  		t.lock.Unlock()
   498  		t.lock.RLock()
   499  	}
   500  
   501  	defer t.lock.RUnlock()
   502  	if _, err := t.head.Write(blob); err != nil {
   503  		return err
   504  	}
   505  	newOffset := atomic.AddUint32(&t.headBytes, bLen)
   506  	idx := indexEntry{
   507  		filenum: atomic.LoadUint32(&t.headId),
   508  		offset:  newOffset,
   509  	}
   510  	// Write indexEntry
   511  	t.index.Write(idx.marshallBinary())
   512  
   513  	t.writeMeter.Mark(int64(bLen + indexEntrySize))
   514  	t.sizeGauge.Inc(int64(bLen + indexEntrySize))
   515  
   516  	atomic.AddUint64(&t.items, 1)
   517  	return nil
   518  }
   519  
   520  // getBounds returns the indexes for the item
   521  // returns start, end, filenumber and error
   522  func (t *freezerTable) getBounds(item uint64) (uint32, uint32, uint32, error) {
   523  	var startIdx, endIdx indexEntry
   524  	buffer := make([]byte, indexEntrySize)
   525  	if _, err := t.index.ReadAt(buffer, int64(item*indexEntrySize)); err != nil {
   526  		return 0, 0, 0, err
   527  	}
   528  	startIdx.unmarshalBinary(buffer)
   529  	if _, err := t.index.ReadAt(buffer, int64((item+1)*indexEntrySize)); err != nil {
   530  		return 0, 0, 0, err
   531  	}
   532  	endIdx.unmarshalBinary(buffer)
   533  	if startIdx.filenum != endIdx.filenum {
   534  		// If a piece of data 'crosses' a data-file,
   535  		// it's actually in one piece on the second data-file.
   536  		// We return a zero-indexEntry for the second file as start
   537  		return 0, endIdx.offset, endIdx.filenum, nil
   538  	}
   539  	return startIdx.offset, endIdx.offset, endIdx.filenum, nil
   540  }
   541  
   542  // Retrieve looks up the data offset of an item with the given number and retrieves
   543  // the raw binary blob from the data file.
   544  func (t *freezerTable) Retrieve(item uint64) ([]byte, error) {
   545  	// Ensure the table and the item is accessible
   546  	if t.index == nil || t.head == nil {
   547  		return nil, errClosed
   548  	}
   549  	if atomic.LoadUint64(&t.items) <= item {
   550  		return nil, errOutOfBounds
   551  	}
   552  	// Ensure the item was not deleted from the tail either
   553  	offset := atomic.LoadUint32(&t.itemOffset)
   554  	if uint64(offset) > item {
   555  		return nil, errOutOfBounds
   556  	}
   557  	t.lock.RLock()
   558  	startOffset, endOffset, filenum, err := t.getBounds(item - uint64(offset))
   559  	if err != nil {
   560  		t.lock.RUnlock()
   561  		return nil, err
   562  	}
   563  	dataFile, exist := t.files[filenum]
   564  	if !exist {
   565  		t.lock.RUnlock()
   566  		return nil, fmt.Errorf("missing data file %d", filenum)
   567  	}
   568  	// Retrieve the data itself, decompress and return
   569  	blob := make([]byte, endOffset-startOffset)
   570  	if _, err := dataFile.ReadAt(blob, int64(startOffset)); err != nil {
   571  		t.lock.RUnlock()
   572  		return nil, err
   573  	}
   574  	t.lock.RUnlock()
   575  	t.readMeter.Mark(int64(len(blob) + 2*indexEntrySize))
   576  
   577  	if t.noCompression {
   578  		return blob, nil
   579  	}
   580  	return snappy.Decode(nil, blob)
   581  }
   582  
   583  // has returns an indicator whether the specified number data
   584  // exists in the freezer table.
   585  func (t *freezerTable) has(number uint64) bool {
   586  	return atomic.LoadUint64(&t.items) > number
   587  }
   588  
   589  // size returns the total data size in the freezer table.
   590  func (t *freezerTable) size() (uint64, error) {
   591  	t.lock.RLock()
   592  	defer t.lock.RUnlock()
   593  
   594  	return t.sizeNolock()
   595  }
   596  
   597  // sizeNolock returns the total data size in the freezer table without obtaining
   598  // the mutex first.
   599  func (t *freezerTable) sizeNolock() (uint64, error) {
   600  	stat, err := t.index.Stat()
   601  	if err != nil {
   602  		return 0, err
   603  	}
   604  	total := uint64(t.maxFileSize)*uint64(t.headId-t.tailId) + uint64(t.headBytes) + uint64(stat.Size())
   605  	return total, nil
   606  }
   607  
   608  // Sync pushes any pending data from memory out to disk. This is an expensive
   609  // operation, so use it with care.
   610  func (t *freezerTable) Sync() error {
   611  	if err := t.index.Sync(); err != nil {
   612  		return err
   613  	}
   614  	return t.head.Sync()
   615  }
   616  
   617  // printIndex is a debug print utility function for testing
   618  func (t *freezerTable) printIndex() {
   619  	buf := make([]byte, indexEntrySize)
   620  
   621  	fmt.Printf("|-----------------|\n")
   622  	fmt.Printf("| fileno | offset |\n")
   623  	fmt.Printf("|--------+--------|\n")
   624  
   625  	for i := uint64(0); ; i++ {
   626  		if _, err := t.index.ReadAt(buf, int64(i*indexEntrySize)); err != nil {
   627  			break
   628  		}
   629  		var entry indexEntry
   630  		entry.unmarshalBinary(buf)
   631  		fmt.Printf("|  %03d   |  %03d   | \n", entry.filenum, entry.offset)
   632  		if i > 100 {
   633  			fmt.Printf(" ... \n")
   634  			break
   635  		}
   636  	}
   637  	fmt.Printf("|-----------------|\n")
   638  }