github.com/balzaczyy/golucene@v0.0.0-20151210033525-d0be9ee89713/core/index/readerPool.go (about)

     1  package index
     2  
     3  import (
     4  	. "github.com/balzaczyy/golucene/core/codec/spi"
     5  	"strings"
     6  	"sync"
     7  )
     8  
     9  type ReaderPool struct {
    10  	owner *IndexWriter
    11  	sync.Locker
    12  	readerMap map[*SegmentCommitInfo]*ReadersAndUpdates
    13  }
    14  
    15  func newReaderPool(owner *IndexWriter) *ReaderPool {
    16  	return &ReaderPool{
    17  		owner:     owner,
    18  		Locker:    &sync.Mutex{},
    19  		readerMap: make(map[*SegmentCommitInfo]*ReadersAndUpdates),
    20  	}
    21  }
    22  
    23  func (pool *ReaderPool) infoIsLive(info *SegmentCommitInfo) bool {
    24  	panic("not implemented yet")
    25  }
    26  
    27  func (pool *ReaderPool) drop(info *SegmentCommitInfo) error {
    28  	pool.Lock()
    29  	defer pool.Unlock()
    30  	panic("not implemented yet")
    31  }
    32  
    33  func (pool *ReaderPool) release(rld *ReadersAndUpdates) error {
    34  	panic("not implemented yet")
    35  }
    36  
    37  func (pool *ReaderPool) Close() error {
    38  	return pool.dropAll(false)
    39  }
    40  
    41  // Remove all our references to readers, and commits any pending changes.
    42  func (pool *ReaderPool) dropAll(doSave bool) error {
    43  	pool.Lock() // synchronized
    44  	defer pool.Unlock()
    45  
    46  	var priorE error
    47  	for len(pool.readerMap) > 0 {
    48  		for k, rld := range pool.readerMap {
    49  			if doSave {
    50  				ok, err := rld.writeLiveDocs(pool.owner.directory)
    51  				if err != nil {
    52  					return err
    53  				}
    54  				if ok {
    55  					// Make sure we only write del docs and field updates for a live segment:
    56  					assert(pool.infoIsLive(rld.info))
    57  					// Must checkpoint because we just
    58  					// created new _X_N.del and field updates files;
    59  					// don't call IW.checkpoint because that also
    60  					// increments SIS.version, which we do not want to
    61  					// do here: it was done previously (after we
    62  					// invoked BDS.applyDeletes), whereas here all we
    63  					// did was move the state to disk:
    64  					err = pool.owner.checkpointNoSIS()
    65  					if err != nil {
    66  						return err
    67  					}
    68  				}
    69  			}
    70  
    71  			// Important to remove as-we-go, not with .clear()
    72  			// in the end, in case we hit an exception;
    73  			// otherwise we could over-decref if close() is
    74  			// called again:
    75  			delete(pool.readerMap, k)
    76  
    77  			// NOTE: it is allowed that these decRefs do not
    78  			// actually close the SRs; this happens when a
    79  			// near real-time reader is kept open after the
    80  			// IndexWriter instance is closed:
    81  			err := rld.dropReaders()
    82  			if err != nil {
    83  				if doSave {
    84  					return err
    85  				}
    86  				if priorE == nil {
    87  					priorE = err
    88  				}
    89  			}
    90  		}
    91  	}
    92  	assert(len(pool.readerMap) == 0)
    93  	return priorE
    94  }
    95  
    96  /* Commit live docs changes for the segment readers for the provided infos. */
    97  func (pool *ReaderPool) commit(infos *SegmentInfos) error {
    98  	pool.Lock() // synchronized
    99  	defer pool.Unlock()
   100  
   101  	for _, info := range infos.Segments {
   102  		if rld, ok := pool.readerMap[info]; ok {
   103  			assert(rld.info == info)
   104  			ok, err := rld.writeLiveDocs(pool.owner.directory)
   105  			if err != nil {
   106  				return err
   107  			}
   108  			if ok {
   109  				// Make sure we only write del docs for a live segment:
   110  				assert(pool.infoIsLive(info))
   111  				// Must checkpoint because we just created new _X_N.del and
   112  				// field updates files; don't call IW.checkpoint because that
   113  				// also increments SIS.version, which we do not want to do
   114  				// here: it was doen previously (after we invoked
   115  				// BDS.applyDeletes), whereas here all we did was move the
   116  				// stats to disk:
   117  				err = pool.owner.checkpointNoSIS()
   118  				if err != nil {
   119  					return err
   120  				}
   121  			}
   122  		}
   123  	}
   124  	return nil
   125  }
   126  
   127  /*
   128  Obtain a ReadersAndUpdates instance from the ReaderPool. If
   129  create is true, you must later call release().
   130  */
   131  func (pool *ReaderPool) get(info *SegmentCommitInfo, create bool) *ReadersAndUpdates {
   132  	pool.Lock() // synchronized
   133  	defer pool.Unlock()
   134  
   135  	assertn(info.Info.Dir == pool.owner.directory, "info.dir=%v vs %v", info.Info.Dir, pool.owner.directory)
   136  
   137  	rld, ok := pool.readerMap[info]
   138  	if !ok {
   139  		if !create {
   140  			return nil
   141  		}
   142  		rld = newReadersAndUpdates(pool.owner, info)
   143  		// Steal initial reference:
   144  		pool.readerMap[info] = rld
   145  	} else {
   146  		assertn(rld.info == info, "rld.info=%v info=%v isLive?= %v vs %v",
   147  			rld.info, info, pool.infoIsLive(rld.info), pool.infoIsLive(info))
   148  	}
   149  
   150  	if create {
   151  		// Return ref to caller:
   152  		rld.incRef()
   153  	}
   154  
   155  	assert(pool.noDups())
   156  
   157  	return rld
   158  }
   159  
   160  /* Make sure that every segment appears only once in the pool: */
   161  func (pool *ReaderPool) noDups() bool {
   162  	seen := make(map[string]bool)
   163  	for info, _ := range pool.readerMap {
   164  		_, ok := seen[info.Info.Name]
   165  		assert(!ok)
   166  		seen[info.Info.Name] = true
   167  	}
   168  	return true
   169  }
   170  
   171  /*
   172  Obtain the number of deleted docs for a pooled reader. If the reader
   173  isn't being pooled, the segmentInfo's delCount is returned.
   174  */
   175  func (pool *ReaderPool) numDeletedDocs(info *SegmentCommitInfo) int {
   176  	// ensureOpen(false)
   177  	delCount := info.DelCount()
   178  	if rld := pool.get(info, false); rld != nil {
   179  		delCount += rld.pendingDeleteCount()
   180  	}
   181  	return delCount
   182  }
   183  
   184  /*
   185  returns a string description of the specified segments, for debugging.
   186  */
   187  func (pool *ReaderPool) segmentsToString(infos []*SegmentCommitInfo) string {
   188  	// TODO synchronized
   189  	var parts []string
   190  	for _, info := range infos {
   191  		parts = append(parts, pool.segmentToString(info))
   192  	}
   193  	return strings.Join(parts, " ")
   194  }
   195  
   196  /*
   197  Returns a string description of the specified segment, for debugging.
   198  */
   199  func (pool *ReaderPool) segmentToString(info *SegmentCommitInfo) string {
   200  	// TODO synchronized
   201  	return info.StringOf(info.Info.Dir, pool.numDeletedDocs(info)-info.DelCount())
   202  }