github.com/balzaczyy/golucene@v0.0.0-20151210033525-d0be9ee89713/core/index/flushQueue.go (about)

     1  package index
     2  
     3  import (
     4  	"container/list"
     5  	"fmt"
     6  	"sync"
     7  	"sync/atomic"
     8  )
     9  
    10  // index/DocumentsWriterFlushQueue.java
    11  
    12  type DocumentsWriterFlushQueue struct {
    13  	sync.Locker
    14  	queue *list.List
    15  	// we track tickets separately since count must be present even
    16  	// before the ticket is constructed, ie. queue.size would not
    17  	// reflect it.
    18  	_ticketCount int32 // aomitc
    19  	purgeLock    sync.Locker
    20  }
    21  
    22  func newDocumentsWriterFlushQueue() *DocumentsWriterFlushQueue {
    23  	return &DocumentsWriterFlushQueue{
    24  		Locker:    &sync.Mutex{},
    25  		queue:     list.New(),
    26  		purgeLock: &sync.Mutex{},
    27  	}
    28  }
    29  
    30  func (fq *DocumentsWriterFlushQueue) addDeletes(deleteQueue *DocumentsWriterDeleteQueue) error {
    31  	panic("not implemented yet")
    32  }
    33  
    34  func (fq *DocumentsWriterFlushQueue) incTickets() {
    35  	assert(atomic.AddInt32(&fq._ticketCount, 1) > 0)
    36  }
    37  
    38  func (fq *DocumentsWriterFlushQueue) decTickets() {
    39  	assert(atomic.AddInt32(&fq._ticketCount, -1) >= 0)
    40  }
    41  
    42  func (fq *DocumentsWriterFlushQueue) addFlushTicket(dwpt *DocumentsWriterPerThread) *SegmentFlushTicket {
    43  	fq.Lock()
    44  	defer fq.Unlock()
    45  
    46  	// Each flush is assigned a ticket in the order they acquire the ticketQueue lock
    47  	fq.incTickets()
    48  	var success = false
    49  	defer func() {
    50  		if !success {
    51  			fq.decTickets()
    52  		}
    53  	}()
    54  
    55  	// prepare flush freezes the global deletes - do in synced block!
    56  	ticket := newSegmentFlushTicket(dwpt.prepareFlush())
    57  	fq.queue.PushBack(ticket)
    58  	success = true
    59  	return ticket
    60  }
    61  
    62  func (q *DocumentsWriterFlushQueue) addSegment(ticket *SegmentFlushTicket, segment *FlushedSegment) {
    63  	q.Lock()
    64  	defer q.Unlock()
    65  	// the actual flush is done asynchronously and once done the
    66  	// FlushedSegment is passed to the flush ticket
    67  	ticket.setSegment(segment)
    68  }
    69  
    70  func (fq *DocumentsWriterFlushQueue) markTicketFailed(ticket *SegmentFlushTicket) {
    71  	fq.Lock()
    72  	defer fq.Unlock()
    73  	// to free the queue we mark tickets as failed just to clean up the queue.
    74  	ticket.fail()
    75  }
    76  
    77  func (fq *DocumentsWriterFlushQueue) hasTickets() bool {
    78  	n := atomic.LoadInt32(&fq._ticketCount)
    79  	assertn(n >= 0, "ticketCount should be >= 0 but was: ", n)
    80  	return n != 0
    81  }
    82  
    83  func assertn(ok bool, msg string, args ...interface{}) {
    84  	if !ok {
    85  		panic(fmt.Sprintf(msg, args...))
    86  	}
    87  }
    88  
    89  func (fq *DocumentsWriterFlushQueue) _purge(writer *IndexWriter) (numPurged int, err error) {
    90  	for {
    91  		if head, canPublish := func() (FlushTicket, bool) {
    92  			fq.Lock()
    93  			defer fq.Unlock()
    94  			if fq.queue.Len() > 0 {
    95  				head := fq.queue.Front().Value.(FlushTicket)
    96  				return head, head.canPublish()
    97  			}
    98  			return nil, false
    99  		}(); canPublish {
   100  			numPurged++
   101  			if err = func() error {
   102  				defer func() {
   103  					fq.Lock()
   104  					defer fq.Unlock()
   105  					// remove the published ticket from the queue
   106  					e := fq.queue.Front()
   107  					fq.queue.Remove(e)
   108  					atomic.AddInt32(&fq._ticketCount, -1)
   109  					assert(e.Value.(FlushTicket) == head)
   110  				}()
   111  				// if we block on publish -> lock IW -> lock BufferedUpdates,
   112  				// we don't block concurrent segment flushes just because
   113  				// they want to append to the queue. The down-side is that we
   114  				// need to force a purge on fullFlush since there could be a
   115  				// ticket still in the queue.
   116  				return head.publish(writer)
   117  			}(); err != nil {
   118  				return
   119  			}
   120  		} else {
   121  			break
   122  		}
   123  	}
   124  	return
   125  }
   126  
   127  func (fq *DocumentsWriterFlushQueue) forcePurge(writer *IndexWriter) (int, error) {
   128  	fq.purgeLock.Lock()
   129  	defer fq.purgeLock.Unlock()
   130  	return fq._purge(writer)
   131  }
   132  
   133  func (fq *DocumentsWriterFlushQueue) ticketCount() int {
   134  	return int(atomic.LoadInt32(&fq._ticketCount))
   135  }
   136  
   137  type FlushTicket interface {
   138  	canPublish() bool
   139  	publish(writer *IndexWriter) error
   140  }
   141  
   142  type FlushTicketImpl struct {
   143  	frozenUpdates *FrozenBufferedUpdates
   144  	published     bool
   145  }
   146  
   147  func newFlushTicket(frozenUpdates *FrozenBufferedUpdates) *FlushTicketImpl {
   148  	assert(frozenUpdates != nil)
   149  	return &FlushTicketImpl{frozenUpdates: frozenUpdates}
   150  }
   151  
   152  /*
   153  Publishes the flushed segment, segment private deletes (if any) and
   154  its associated global delete (if present) to IndexWriter. The actual
   155  publishing operation is syned on IW -> BDS so that the SegmentInfo's
   156  delete generation is always GlobalPacket_deleteGeneration + 1
   157  */
   158  func (t *FlushTicketImpl) publishFlushedSegment(indexWriter *IndexWriter,
   159  	newSegment *FlushedSegment, globalPacket *FrozenBufferedUpdates) error {
   160  	assert(newSegment != nil)
   161  	assert(newSegment.segmentInfo != nil)
   162  	segmentUpdates := newSegment.segmentUpdates
   163  	// fmt.Printf("FLUSH: %v\n", newSegment.segmentInfo.Name())
   164  	if is := indexWriter.infoStream; is.IsEnabled("DW") {
   165  		is.Message("DW", "publishFlushedSegment seg-private updates=%v", segmentUpdates)
   166  		if segmentUpdates != nil {
   167  			is.Message("DW", "flush: push buffered seg private updates: %v", segmentUpdates)
   168  		}
   169  	}
   170  	// now publish!
   171  	return indexWriter.publishFlushedSegment(newSegment.segmentInfo, segmentUpdates, globalPacket)
   172  }
   173  
   174  func (t *FlushTicketImpl) finishFlush(indexWriter *IndexWriter,
   175  	newSegment *FlushedSegment, bufferedUpdates *FrozenBufferedUpdates) error {
   176  	// Finish the flushed segment and publish it to IndexWriter
   177  	if newSegment == nil {
   178  		assert(bufferedUpdates != nil)
   179  		if bufferedUpdates != nil && bufferedUpdates.any() {
   180  			indexWriter.publishFrozenUpdates(bufferedUpdates)
   181  			if indexWriter.infoStream.IsEnabled("DW") {
   182  				indexWriter.infoStream.Message("DW", "flush: push buffered updates: %v", bufferedUpdates)
   183  			}
   184  		}
   185  		return nil
   186  	}
   187  	return t.publishFlushedSegment(indexWriter, newSegment, bufferedUpdates)
   188  }
   189  
   190  type SegmentFlushTicket struct {
   191  	*FlushTicketImpl
   192  	segment *FlushedSegment
   193  	failed  bool
   194  }
   195  
   196  func newSegmentFlushTicket(frozenUpdates *FrozenBufferedUpdates) *SegmentFlushTicket {
   197  	return &SegmentFlushTicket{
   198  		FlushTicketImpl: newFlushTicket(frozenUpdates),
   199  	}
   200  }
   201  
   202  func (ticket *SegmentFlushTicket) publish(writer *IndexWriter) error {
   203  	assertn(!ticket.published, "ticket was already publised - can not publish twice")
   204  	ticket.published = true
   205  	return ticket.finishFlush(writer, ticket.segment, ticket.frozenUpdates)
   206  }
   207  
   208  func (ticket *SegmentFlushTicket) setSegment(segment *FlushedSegment) {
   209  	assert(!ticket.failed)
   210  	ticket.segment = segment
   211  }
   212  
   213  func (ticket *SegmentFlushTicket) fail() {
   214  	assert(ticket.segment == nil)
   215  	ticket.failed = true
   216  }
   217  
   218  func (ticket *SegmentFlushTicket) canPublish() bool {
   219  	return ticket.segment != nil || ticket.failed
   220  }