github.com/balzaczyy/golucene@v0.0.0-20151210033525-d0be9ee89713/core/index/invertedDocConsumer.go (about)

     1  package index
     2  
     3  import (
     4  	. "github.com/balzaczyy/golucene/core/index/model"
     5  	"github.com/balzaczyy/golucene/core/util"
     6  )
     7  
     8  // type InvertedDocConsumer interface {
     9  // 	// Abort (called after hitting abort error)
    10  // 	abort()
    11  // 	addField(*DocInverterPerField, *model.FieldInfo) InvertedDocConsumerPerField
    12  // 	// Flush a new segment
    13  // 	flush(map[string]InvertedDocConsumerPerField, *model.SegmentWriteState) error
    14  // 	startDocument()
    15  // 	finishDocument() error
    16  // }
    17  
    18  /*
    19  This class is passed each token produced by the analyzer on each
    20  field during indexing, and it stores these tokens in a hash table,
    21  and allocates separate byte streams per token. Consumers of this
    22  class, eg FreqProxTermsWriter and TermVectorsConsumer, write their
    23  own byte streams under each term.
    24  */
    25  type TermsHash interface {
    26  	startDocument()
    27  	finishDocument() error
    28  	abort()
    29  	reset()
    30  	setTermBytePool(*util.ByteBlockPool)
    31  	flush(map[string]TermsHashPerField, *SegmentWriteState) error
    32  
    33  	fields() *TermsHashImpl // workaround abstract class
    34  
    35  	TermsHashImplSPI
    36  }
    37  
    38  type TermsHashImplSPI interface {
    39  	addField(*FieldInvertState, *FieldInfo) TermsHashPerField
    40  }
    41  
    42  type TermsHashImpl struct {
    43  	spi TermsHashImplSPI
    44  
    45  	nextTermsHash TermsHash
    46  
    47  	intPool      *util.IntBlockPool
    48  	bytePool     *util.ByteBlockPool
    49  	termBytePool *util.ByteBlockPool
    50  	bytesUsed    util.Counter
    51  
    52  	docState *docState
    53  
    54  	trackAllocations bool
    55  }
    56  
    57  func newTermsHash(spi TermsHashImplSPI,
    58  	docWriter *DocumentsWriterPerThread,
    59  	trackAllocations bool, nextTermsHash TermsHash) *TermsHashImpl {
    60  
    61  	ans := &TermsHashImpl{
    62  		spi:              spi,
    63  		docState:         docWriter.docState,
    64  		trackAllocations: trackAllocations,
    65  		nextTermsHash:    nextTermsHash,
    66  		intPool:          util.NewIntBlockPool(docWriter.intBlockAllocator),
    67  		bytePool:         util.NewByteBlockPool(docWriter.byteBlockAllocator),
    68  	}
    69  	if trackAllocations {
    70  		ans.bytesUsed = docWriter._bytesUsed
    71  	} else {
    72  		ans.bytesUsed = util.NewCounter()
    73  	}
    74  	if nextTermsHash != nil {
    75  		ans.termBytePool = ans.bytePool
    76  		nextTermsHash.setTermBytePool(ans.bytePool)
    77  	}
    78  	return ans
    79  }
    80  
    81  func (h *TermsHashImpl) fields() *TermsHashImpl {
    82  	return h
    83  }
    84  
    85  func (hash *TermsHashImpl) setTermBytePool(p *util.ByteBlockPool) {
    86  	hash.termBytePool = p
    87  }
    88  
    89  func (hash *TermsHashImpl) abort() {
    90  	defer func() {
    91  		if hash.nextTermsHash != nil {
    92  			hash.nextTermsHash.abort()
    93  		}
    94  	}()
    95  	hash.reset()
    96  }
    97  
    98  /* Clear all state */
    99  func (hash *TermsHashImpl) reset() {
   100  	// we don't reuse so we drop everything and don't fill with 0
   101  	hash.intPool.Reset(false, false)
   102  	hash.bytePool.Reset(false, false)
   103  }
   104  
   105  func (hash *TermsHashImpl) flush(fieldsToFlush map[string]TermsHashPerField,
   106  	state *SegmentWriteState) error {
   107  
   108  	if hash.nextTermsHash != nil {
   109  		nextChildFields := make(map[string]TermsHashPerField)
   110  		for k, v := range fieldsToFlush {
   111  			nextChildFields[k] = v.next()
   112  		}
   113  		return hash.nextTermsHash.flush(nextChildFields, state)
   114  	}
   115  	return nil
   116  }
   117  
   118  // func (h *TermsHash) addField(docInverterPerField *DocInverterPerField,
   119  // 	fieldInfo *model.FieldInfo) InvertedDocConsumerPerField {
   120  // 	return newTermsHashPerField(docInverterPerField, h, h.nextTermsHash, fieldInfo)
   121  // }
   122  
   123  func (h *TermsHashImpl) finishDocument() error {
   124  	if h.nextTermsHash != nil {
   125  		return h.nextTermsHash.finishDocument()
   126  	}
   127  	return nil
   128  }
   129  
   130  func (h *TermsHashImpl) startDocument() {
   131  	if h.nextTermsHash != nil {
   132  		h.nextTermsHash.startDocument()
   133  	}
   134  }