github.com/zhiqiangxu/util@v0.0.0-20230112053021-0a7aee056cd5/diskqueue/diskqueue.go (about)

     1  package diskqueue
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"encoding/binary"
     7  	"errors"
     8  	"fmt"
     9  	"net"
    10  	"os"
    11  	"path/filepath"
    12  	"sync"
    13  	"time"
    14  
    15  	"sync/atomic"
    16  
    17  	"github.com/zhiqiangxu/util"
    18  	"github.com/zhiqiangxu/util/closer"
    19  	"github.com/zhiqiangxu/util/logger"
    20  	"github.com/zhiqiangxu/util/mapped"
    21  	"github.com/zhiqiangxu/util/wm"
    22  	"go.uber.org/zap"
    23  )
    24  
    25  // StreamBytes is bytes with offset info
    26  type StreamBytes struct {
    27  	Bytes  []byte
    28  	Offset int64
    29  }
    30  
    31  type queueInterface interface {
    32  	queueMetaROInterface
    33  	Put([]byte) (int64, error)
    34  	Read(ctx context.Context, offset int64) ([]byte, error)
    35  	StreamRead(ctx context.Context, offset int64) (<-chan StreamBytes, error)
    36  	StreamOffsetRead(offsetCh <-chan int64) (<-chan StreamBytes, error)
    37  	Close()
    38  	GC() (int, error)
    39  	Delete() error
    40  }
    41  
    42  var _ queueInterface = (*Queue)(nil)
    43  
    44  // Queue for diskqueue
    45  type Queue struct {
    46  	putting    int32
    47  	gcFlag     uint32
    48  	closeState uint32
    49  	closer     *closer.Naive
    50  	meta       *queueMeta
    51  	conf       Conf
    52  	writeCh    chan *writeRequest
    53  	writeReqs  []*writeRequest
    54  	writeBuffs net.Buffers
    55  	sizeBuffs  []byte
    56  	gcCh       chan *gcRequest
    57  	// guards files,minValidIndex
    58  	flock         sync.RWMutex
    59  	files         []*qfile
    60  	minValidIndex int
    61  	once          sync.Once
    62  	wm            *wm.Offset // maintains commit offset
    63  }
    64  
    65  const (
    66  	defaultWriteBatch      = 100
    67  	defaultMaxMsgSize      = 512 * 1024 * 1024
    68  	defaultMaxPutting      = 200000
    69  	defaultPersistDuration = 3 * 24 * time.Hour
    70  	sizeLength             = 4
    71  )
    72  
    73  // New is ctor for Queue
    74  func New(conf Conf) (q *Queue, err error) {
    75  	if conf.Directory == "" {
    76  		err = errEmptyDirectory
    77  		return
    78  	}
    79  	if conf.PersistDuration < time.Hour {
    80  		conf.PersistDuration = defaultPersistDuration
    81  	}
    82  	if conf.MaxFileSize <= 0 {
    83  		conf.MaxFileSize = qfileDefaultSize
    84  	}
    85  
    86  	if conf.WriteBatch <= 0 {
    87  		conf.WriteBatch = defaultWriteBatch
    88  	}
    89  	if conf.MaxMsgSize <= 0 {
    90  		conf.MaxMsgSize = defaultMaxMsgSize
    91  	}
    92  	if conf.MaxPutting <= 0 {
    93  		conf.MaxPutting = defaultMaxPutting
    94  	}
    95  	if conf.CustomDecoder != nil {
    96  		conf.customDecoder = true
    97  	}
    98  
    99  	q = &Queue{
   100  		closer:    closer.NewNaive(),
   101  		conf:      conf,
   102  		writeCh:   make(chan *writeRequest, conf.WriteBatch),
   103  		writeReqs: make([]*writeRequest, 0, conf.WriteBatch),
   104  		gcCh:      make(chan *gcRequest),
   105  		wm:        wm.NewOffset(),
   106  	}
   107  	if conf.customDecoder {
   108  		q.writeBuffs = make(net.Buffers, 0, conf.WriteBatch)
   109  		// q.sizeBuffs = nil
   110  	} else {
   111  		q.writeBuffs = make(net.Buffers, 0, conf.WriteBatch*2)
   112  		q.sizeBuffs = make([]byte, sizeLength*conf.WriteBatch)
   113  	}
   114  	q.meta = newQueueMeta(&q.conf)
   115  	err = q.init()
   116  	return
   117  }
   118  
   119  const (
   120  	dirPerm = 0770
   121  )
   122  
   123  // NumFiles is proxy for meta
   124  func (q *Queue) NumFiles() int {
   125  	return q.meta.NumFiles()
   126  }
   127  
   128  func (q *Queue) writeBufferPool() *sync.Pool {
   129  	if !q.conf.EnableWriteBuffer {
   130  		return nil
   131  	}
   132  	if q.conf.WriteBufferPool != nil {
   133  		return q.conf.WriteBufferPool
   134  	} else if q.conf.writeBufferPool != nil {
   135  		return q.conf.writeBufferPool
   136  	} else {
   137  		pool := &sync.Pool{
   138  			New: func() interface{} {
   139  				return bytes.NewBuffer(make([]byte, q.conf.MaxFileSize))
   140  			},
   141  		}
   142  		q.conf.writeBufferPool = pool
   143  		return pool
   144  	}
   145  }
   146  
   147  // Stat is proxy for meta
   148  func (q *Queue) Stat() QueueMeta {
   149  	return q.meta.Stat()
   150  }
   151  
   152  // FileMeta is proxy for meta
   153  func (q *Queue) FileMeta(idx int) FileMeta {
   154  	return q.meta.FileMeta(idx)
   155  }
   156  
   157  // init the queue
   158  func (q *Queue) init() (err error) {
   159  
   160  	// 确保各种目录存在
   161  	err = os.MkdirAll(filepath.Join(q.conf.Directory, qfSubDir), dirPerm)
   162  	if err != nil {
   163  		return
   164  	}
   165  
   166  	// 初始化元数据
   167  	err = q.meta.Init()
   168  	if err != nil {
   169  		return
   170  	}
   171  
   172  	// 加载qfile
   173  	stat := q.Stat()
   174  	nFiles := int(stat.FileCount)
   175  	q.minValidIndex = int(stat.MinValidIndex)
   176  	q.files = make([]*qfile, 0, nFiles-q.minValidIndex)
   177  	var qf *qfile
   178  	for i := q.minValidIndex; i < nFiles; i++ {
   179  		qf, err = openQfile(q, i, i == nFiles-1)
   180  		if err != nil {
   181  			return
   182  		}
   183  		if i < (nFiles - 1) {
   184  			err = qf.Shrink()
   185  			if err != nil {
   186  				return
   187  			}
   188  		}
   189  		q.files = append(q.files, qf)
   190  	}
   191  
   192  	// enough data, ready to go!
   193  
   194  	if len(q.files) == 0 {
   195  		err = q.createQfile()
   196  		if err != nil {
   197  			logger.Instance().Error("Init createQfile", zap.Error(err))
   198  			return
   199  		}
   200  	}
   201  
   202  	util.GoFunc(q.closer.WaitGroupRef(), q.handleWriteAndGC)
   203  	util.GoFunc(q.closer.WaitGroupRef(), q.handleCommit)
   204  
   205  	return nil
   206  }
   207  
   208  func (q *Queue) maxValidIndex() int {
   209  	return q.minValidIndex + len(q.files) - 1
   210  }
   211  
   212  func (q *Queue) nextIndex() int {
   213  	return q.minValidIndex + len(q.files)
   214  }
   215  
   216  func (q *Queue) createQfile() (err error) {
   217  	var qf *qfile
   218  	if len(q.files) == 0 {
   219  		qf, err = createQfile(q, 0, 0)
   220  		if err != nil {
   221  			return
   222  		}
   223  	} else {
   224  		qf = q.files[len(q.files)-1]
   225  		commitOffset := qf.DoneWrite()
   226  		q.wm.Done(commitOffset)
   227  		qf, err = createQfile(q, q.nextIndex(), qf.WrotePosition())
   228  		if err != nil {
   229  			return
   230  		}
   231  	}
   232  	q.flock.Lock()
   233  	q.files = append(q.files, qf)
   234  	q.flock.Unlock()
   235  	return
   236  }
   237  
   238  type writeResult struct {
   239  	offset int64
   240  }
   241  
   242  type writeRequest struct {
   243  	data   []byte
   244  	result chan writeResult
   245  }
   246  
   247  type gcResult struct {
   248  	n   int
   249  	err error
   250  }
   251  
   252  type gcRequest struct {
   253  	result chan gcResult
   254  }
   255  
   256  var wreqPool = sync.Pool{New: func() interface{} {
   257  	return &writeRequest{result: make(chan writeResult, 1)}
   258  }}
   259  
   260  // dedicated G so that write is serial
   261  func (q *Queue) handleWriteAndGC() {
   262  	var (
   263  		wReq           *writeRequest
   264  		gcReq          *gcRequest
   265  		qf             *qfile
   266  		err            error
   267  		wroteN, totalN int64
   268  		gcN            int
   269  	)
   270  
   271  	startFM := q.meta.FileMeta(q.maxValidIndex())
   272  	startWrotePosition := startFM.EndOffset
   273  
   274  	var (
   275  		updateWriteBufsFunc func(i int, data []byte)
   276  		actualSizeLength    int64
   277  	)
   278  	if q.conf.customDecoder {
   279  		updateWriteBufsFunc = func(i int, data []byte) {
   280  			q.writeBuffs = append(q.writeBuffs, data)
   281  		}
   282  	} else {
   283  		updateWriteBufsFunc = func(i int, data []byte) {
   284  			q.updateSizeBuf(i, len(data))
   285  			q.writeBuffs = append(q.writeBuffs, q.getSizeBuf(i))
   286  			q.writeBuffs = append(q.writeBuffs, wReq.data)
   287  		}
   288  		actualSizeLength = sizeLength
   289  	}
   290  
   291  	handleWriteFunc := func() {
   292  		// enough data, ready to go!
   293  		qf = q.files[len(q.files)-1]
   294  
   295  		writeBuffs := q.writeBuffs
   296  
   297  		util.TryUntilSuccess(func() bool {
   298  			wroteN, err = qf.writeBuffers(&q.writeBuffs)
   299  			totalN += wroteN
   300  			if err == mapped.ErrWriteBeyond {
   301  				// 写超了,需要新开文件
   302  				err = q.createQfile()
   303  				if err != nil {
   304  					logger.Instance().Error("handleWriteAndGC createQfile", zap.Error(err))
   305  				} else {
   306  					qf = q.files[len(q.files)-1]
   307  					wroteN, err = qf.writeBuffers(&q.writeBuffs)
   308  					totalN += wroteN
   309  				}
   310  			}
   311  			if err != nil {
   312  				logger.Instance().Error("handleWriteAndGC WriteTo", zap.Error(err))
   313  				return false
   314  			}
   315  			return true
   316  		}, time.Second)
   317  
   318  		q.meta.UpdateFileStat(q.maxValidIndex(), len(q.writeReqs), startWrotePosition+totalN, NowNano())
   319  		if !q.conf.EnableWriteBuffer {
   320  			q.wm.Done(startWrotePosition + totalN)
   321  		}
   322  
   323  		q.writeBuffs = writeBuffs
   324  
   325  		// 全部写入成功
   326  		for _, req := range q.writeReqs {
   327  			req.result <- writeResult{offset: startWrotePosition}
   328  			startWrotePosition += actualSizeLength + int64(len(req.data))
   329  		}
   330  		totalN = 0
   331  	}
   332  
   333  	for {
   334  		select {
   335  		case <-q.closer.ClosedSignal():
   336  			// drain writeCh before quit
   337  		DrainStart:
   338  			q.writeReqs = q.writeReqs[:0]
   339  			q.writeBuffs = q.writeBuffs[:0]
   340  		DrainLoop:
   341  			for i := 0; i < q.conf.WriteBatch; i++ {
   342  				select {
   343  				case wReq = <-q.writeCh:
   344  					q.writeReqs = append(q.writeReqs, wReq)
   345  					updateWriteBufsFunc(i, wReq.data)
   346  				default:
   347  					break DrainLoop
   348  				}
   349  			}
   350  
   351  			if len(q.writeReqs) > 0 {
   352  				handleWriteFunc()
   353  
   354  				if len(q.writeReqs) == q.conf.WriteBatch {
   355  					goto DrainStart
   356  				}
   357  			}
   358  
   359  			close(q.writeCh)
   360  
   361  			q.writeReqs = q.writeReqs[:0]
   362  			q.writeBuffs = q.writeBuffs[:0]
   363  
   364  			var ok bool
   365  		DrainLoopFinal:
   366  			for i := 0; i < q.conf.WriteBatch; i++ {
   367  				select {
   368  				case wReq, ok = <-q.writeCh:
   369  					if !ok {
   370  						break DrainLoopFinal
   371  					}
   372  					q.writeReqs = append(q.writeReqs, wReq)
   373  					updateWriteBufsFunc(i, wReq.data)
   374  				}
   375  			}
   376  
   377  			if len(q.writeReqs) > 0 {
   378  				handleWriteFunc()
   379  			}
   380  			return
   381  		case gcReq = <-q.gcCh:
   382  
   383  			gcN, err = q.gc()
   384  			gcReq.result <- gcResult{n: gcN, err: err}
   385  
   386  		case wReq = <-q.writeCh:
   387  			q.writeReqs = q.writeReqs[:0]
   388  			q.writeBuffs = q.writeBuffs[:0]
   389  
   390  			q.writeReqs = append(q.writeReqs, wReq)
   391  			updateWriteBufsFunc(0, wReq.data)
   392  
   393  			// collect more data
   394  		BatchLoop:
   395  			for i := 0; i < q.conf.WriteBatch-1; i++ {
   396  				select {
   397  				case wReq = <-q.writeCh:
   398  					q.writeReqs = append(q.writeReqs, wReq)
   399  					updateWriteBufsFunc(i+1, wReq.data)
   400  				default:
   401  					break BatchLoop
   402  				}
   403  			}
   404  
   405  			handleWriteFunc()
   406  		}
   407  	}
   408  }
   409  
   410  func (q *Queue) getSizeBuf(i int) []byte {
   411  	return q.sizeBuffs[sizeLength*i : sizeLength*i+sizeLength]
   412  }
   413  
   414  func (q *Queue) updateSizeBuf(i int, size int) {
   415  	binary.BigEndian.PutUint32(q.sizeBuffs[sizeLength*i:], uint32(size))
   416  }
   417  
   418  const (
   419  	commitMinimumInterval = 1
   420  )
   421  
   422  func (q *Queue) handleCommit() {
   423  	if !q.conf.EnableWriteBuffer {
   424  		return
   425  	}
   426  
   427  	interval := commitMinimumInterval
   428  	if q.conf.CommitInterval > commitMinimumInterval {
   429  		interval = q.conf.CommitInterval
   430  	}
   431  	ticker := time.NewTicker(time.Second * time.Duration(interval))
   432  
   433  	for {
   434  		select {
   435  		case <-ticker.C:
   436  			q.flock.RLock()
   437  			qf := q.files[len(q.files)-1]
   438  			q.flock.RUnlock()
   439  			commitOffset := qf.Commit()
   440  			q.wm.Done(commitOffset)
   441  		case <-q.closer.ClosedSignal():
   442  			return
   443  		}
   444  	}
   445  }
   446  
   447  // Put data to queue
   448  func (q *Queue) Put(data []byte) (offset int64, err error) {
   449  
   450  	if !q.conf.customDecoder && len(data) > q.conf.MaxMsgSize {
   451  		err = errMsgTooLarge
   452  		return
   453  	}
   454  
   455  	err = q.checkCloseState()
   456  	if err != nil {
   457  		return
   458  	}
   459  
   460  	putting := atomic.AddInt32(&q.putting, 1)
   461  	defer atomic.AddInt32(&q.putting, -1)
   462  	if int(putting) > q.conf.MaxPutting {
   463  		err = errMaxPutting
   464  		return
   465  	}
   466  
   467  	wreq := wreqPool.Get().(*writeRequest)
   468  	wreq.data = data
   469  	if len(wreq.result) > 0 {
   470  		<-wreq.result
   471  	}
   472  
   473  	select {
   474  	case q.writeCh <- wreq:
   475  		result := <-wreq.result
   476  		wreq.data = nil
   477  		wreqPool.Put(wreq)
   478  		offset = result.offset
   479  		return
   480  	case <-q.closer.ClosedSignal():
   481  		err = errAlreadyClosed
   482  		return
   483  	}
   484  
   485  }
   486  
   487  func (q *Queue) qfByIdx(idx int) *qfile {
   488  	fileIndex := idx - q.minValidIndex
   489  	if fileIndex < 0 {
   490  		return nil
   491  	}
   492  	return q.files[fileIndex]
   493  }
   494  
   495  // ReadFrom for read from offset
   496  func (q *Queue) Read(ctx context.Context, offset int64) (data []byte, err error) {
   497  	err = q.checkCloseState()
   498  	if err != nil {
   499  		return
   500  	}
   501  
   502  	idx := q.meta.LocateFile(offset)
   503  	if idx < 0 {
   504  		err = errInvalidOffset
   505  		return
   506  	}
   507  
   508  	q.flock.RLock()
   509  	qf := q.qfByIdx(idx)
   510  	if qf == nil {
   511  		q.flock.RUnlock()
   512  		err = errInvalidOffset
   513  		return
   514  	}
   515  
   516  	rfc := qf.IncrRef()
   517  
   518  	q.flock.RUnlock()
   519  
   520  	// already deleted
   521  	if rfc == 1 {
   522  		err = errInvalidOffset
   523  		return
   524  	}
   525  
   526  	defer qf.DecrRef()
   527  
   528  	data, err = qf.Read(ctx, offset)
   529  
   530  	return
   531  }
   532  
   533  // StreamRead for stream read
   534  func (q *Queue) StreamRead(ctx context.Context, offset int64) (chRet <-chan StreamBytes, err error) {
   535  	err = q.checkCloseState()
   536  	if err != nil {
   537  		return
   538  	}
   539  
   540  	idx := q.meta.LocateFile(offset)
   541  	if idx < 0 {
   542  		err = errInvalidOffset
   543  		return
   544  	}
   545  
   546  	q.flock.RLock()
   547  	qf := q.qfByIdx(idx)
   548  	if qf == nil {
   549  		q.flock.RUnlock()
   550  		err = errInvalidOffset
   551  		return
   552  	}
   553  
   554  	rfc := qf.IncrRef()
   555  
   556  	q.flock.RUnlock()
   557  
   558  	// already deleted
   559  	if rfc == 1 {
   560  		err = errInvalidOffset
   561  		return
   562  	}
   563  
   564  	defer qf.DecrRef()
   565  
   566  	ch := make(chan StreamBytes)
   567  	chRet = ch
   568  	util.GoFunc(q.closer.WaitGroupRef(), func() {
   569  		// close the channel when done
   570  		defer close(ch)
   571  
   572  		streamCtx, streamCancel := context.WithCancel(ctx)
   573  		defer streamCancel()
   574  
   575  		var streamWG sync.WaitGroup
   576  		util.GoFunc(&streamWG, func() {
   577  			for {
   578  				otherFile, _ := qf.StreamRead(streamCtx, offset, ch)
   579  				if !otherFile {
   580  					return
   581  				}
   582  				if idx < q.meta.NumFiles()-1 {
   583  					offset = qf.WrotePosition()
   584  					idx++
   585  					qf.DecrRef()
   586  					q.flock.RLock()
   587  					qf = q.qfByIdx(idx)
   588  					if qf == nil {
   589  						logger.Instance().Fatal("qfByIdx nil", zap.Int("idx", idx))
   590  					}
   591  					rfc := qf.IncrRef()
   592  					q.flock.RUnlock()
   593  					if rfc == 1 {
   594  						logger.Instance().Fatal("StreamRead rfc == 1", zap.Int("idx", idx))
   595  					}
   596  
   597  				} else {
   598  					return
   599  				}
   600  			}
   601  		})
   602  
   603  		select {
   604  		case <-q.closer.ClosedSignal():
   605  			streamCancel()
   606  		case <-ctx.Done():
   607  		}
   608  		streamWG.Wait()
   609  
   610  	})
   611  
   612  	return
   613  }
   614  
   615  type streamOffsetResp struct {
   616  	lastOffset int64
   617  	otherFile  bool
   618  	err        error
   619  }
   620  
   621  // StreamOffsetRead for continuous read by offset
   622  // close offsetCh to signal the end of read
   623  func (q *Queue) StreamOffsetRead(offsetCh <-chan int64) (chRet <-chan StreamBytes, err error) {
   624  	err = q.checkCloseState()
   625  	if err != nil {
   626  		return
   627  	}
   628  
   629  	ch := make(chan StreamBytes)
   630  	chRet = ch
   631  	util.GoFunc(q.closer.WaitGroupRef(), func() {
   632  		// close the channel when done
   633  		defer close(ch)
   634  
   635  		streamCtx, streamCancel := context.WithCancel(context.Background())
   636  		defer streamCancel()
   637  
   638  		var (
   639  			streamWG sync.WaitGroup
   640  			offset   int64
   641  			ok       bool
   642  			resp     *streamOffsetResp
   643  		)
   644  
   645  		respCh := make(chan *streamOffsetResp, 1)
   646  
   647  		select {
   648  		case offset, ok = <-offsetCh:
   649  			if !ok {
   650  				return
   651  			}
   652  			for {
   653  				idx := q.meta.LocateFile(offset)
   654  				if idx < 0 {
   655  					return
   656  				}
   657  				q.flock.RLock()
   658  				qf := q.qfByIdx(idx)
   659  				if qf == nil {
   660  					q.flock.RUnlock()
   661  					return
   662  				}
   663  
   664  				rfc := qf.IncrRef()
   665  
   666  				q.flock.RUnlock()
   667  
   668  				// already deleted
   669  				if rfc == 1 {
   670  					return
   671  				}
   672  
   673  				util.GoFunc(&streamWG, func() {
   674  					otherFile, lastOffset, err := qf.StreamOffsetRead(streamCtx, offset, offsetCh, ch)
   675  					respCh <- &streamOffsetResp{otherFile: otherFile, lastOffset: lastOffset, err: err}
   676  				})
   677  
   678  				quit := false
   679  				select {
   680  				case <-q.closer.ClosedSignal():
   681  					streamCancel()
   682  					streamWG.Wait()
   683  					quit = true
   684  				case resp = <-respCh:
   685  					if resp.otherFile {
   686  						offset = resp.lastOffset
   687  					} else {
   688  						quit = true
   689  					}
   690  				}
   691  
   692  				qf.DecrRef()
   693  				if quit {
   694  					return
   695  				}
   696  			}
   697  
   698  		case <-q.closer.ClosedSignal():
   699  			return
   700  		}
   701  	})
   702  
   703  	return
   704  }
   705  
   706  var (
   707  	errEmptyDirectory = errors.New("directory empty")
   708  	errAlreadyClosed  = errors.New("already closed")
   709  	errAlreadyClosing = errors.New("already closing")
   710  	errMsgTooLarge    = errors.New("msg too large")
   711  	errMaxPutting     = errors.New("too much putting")
   712  	errInvalidOffset  = errors.New("invalid offset")
   713  	errOffsetChClosed = errors.New("offsetCh closed")
   714  )
   715  
   716  const (
   717  	open uint32 = iota
   718  	closing
   719  	closed
   720  )
   721  
   722  func (q *Queue) checkCloseState() (err error) {
   723  	closeState := atomic.LoadUint32(&q.closeState)
   724  	switch closeState {
   725  	case open:
   726  	case closing:
   727  		err = errAlreadyClosing
   728  	case closed:
   729  		err = errAlreadyClosed
   730  	default:
   731  		err = fmt.Errorf("unknown close state:%d", closeState)
   732  	}
   733  	return
   734  }
   735  
   736  // Close the queue
   737  func (q *Queue) Close() {
   738  
   739  	q.once.Do(func() {
   740  		atomic.StoreUint32(&q.closeState, closing)
   741  
   742  		q.closer.SignalAndWait()
   743  
   744  		util.TryUntilSuccess(func() bool {
   745  			// try until success
   746  			err := q.meta.Close()
   747  			if err != nil {
   748  				logger.Instance().Error("meta.Close", zap.Error(err))
   749  				return false
   750  			}
   751  
   752  			return true
   753  			// need human interfere
   754  
   755  		}, time.Second)
   756  
   757  		for _, file := range q.files {
   758  			err := file.Close()
   759  			if err != nil {
   760  				logger.Instance().Error("file.Close", zap.Error(err))
   761  			}
   762  		}
   763  		atomic.StoreUint32(&q.closeState, closed)
   764  	})
   765  
   766  	return
   767  }
   768  
   769  var (
   770  	// ErrGCing when already gc
   771  	ErrGCing = errors.New("already CGing")
   772  )
   773  
   774  // GC removes expired qfiles
   775  func (q *Queue) GC() (n int, err error) {
   776  	err = q.checkCloseState()
   777  	if err != nil {
   778  		return
   779  	}
   780  
   781  	swapped := atomic.CompareAndSwapUint32(&q.gcFlag, 0, 1)
   782  	if !swapped {
   783  		err = ErrGCing
   784  		return
   785  	}
   786  	defer atomic.StoreUint32(&q.gcFlag, 0)
   787  
   788  	gcReq := &gcRequest{result: make(chan gcResult, 1)}
   789  
   790  	select {
   791  	case q.gcCh <- gcReq:
   792  		select {
   793  		case <-q.closer.ClosedSignal():
   794  			err = errAlreadyClosed
   795  			return
   796  		case gcResult := <-gcReq.result:
   797  			n, err = gcResult.n, gcResult.err
   798  			return
   799  		}
   800  	case <-q.closer.ClosedSignal():
   801  		err = errAlreadyClosed
   802  		return
   803  	}
   804  }
   805  
   806  func (q *Queue) gc() (n int, err error) {
   807  	stat := q.Stat()
   808  	maxIdx := q.NumFiles() - 1
   809  	idx := int(stat.MinValidIndex)
   810  
   811  	for {
   812  		if idx >= maxIdx {
   813  			return
   814  		}
   815  		fileMeta := q.FileMeta(idx)
   816  
   817  		fileEndTime := time.Unix(0, fileMeta.EndTime)
   818  		if time.Now().Sub(fileEndTime) < q.conf.PersistDuration {
   819  			return
   820  		}
   821  
   822  		// can GC
   823  
   824  		q.flock.Lock()
   825  
   826  		qf := q.qfByIdx(idx)
   827  		q.minValidIndex = idx + 1
   828  		q.files[0] = nil
   829  		q.files = q.files[1:]
   830  
   831  		q.flock.Unlock()
   832  
   833  		q.meta.UpdateMinValidIndex(uint32(idx))
   834  		qf.DecrRef()
   835  
   836  		idx++
   837  		n++
   838  
   839  	}
   840  
   841  }
   842  
   843  // Delete the queue
   844  func (q *Queue) Delete() error {
   845  	q.Close()
   846  	return os.RemoveAll(q.conf.Directory)
   847  }