github.com/keltia/go-ipfs@v0.3.8-0.20150909044612-210793031c63/blockservice/worker/worker.go (about)

     1  // TODO FIXME name me
     2  package worker
     3  
     4  import (
     5  	"container/list"
     6  	"errors"
     7  	"time"
     8  
     9  	process "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess"
    10  	procctx "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/context"
    11  	ratelimit "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/ratelimit"
    12  	blocks "github.com/ipfs/go-ipfs/blocks"
    13  	key "github.com/ipfs/go-ipfs/blocks/key"
    14  	exchange "github.com/ipfs/go-ipfs/exchange"
    15  	util "github.com/ipfs/go-ipfs/util"
    16  )
    17  
    18  var log = util.Logger("blockservice")
    19  
    20  var DefaultConfig = Config{
    21  	NumWorkers:       1,
    22  	ClientBufferSize: 0,
    23  	WorkerBufferSize: 0,
    24  }
    25  
    26  type Config struct {
    27  	// NumWorkers sets the number of background workers that provide blocks to
    28  	// the exchange.
    29  	NumWorkers int
    30  
    31  	// ClientBufferSize allows clients of HasBlock to send up to
    32  	// |ClientBufferSize| blocks without blocking.
    33  	ClientBufferSize int
    34  
    35  	// WorkerBufferSize can be used in conjunction with NumWorkers to reduce
    36  	// communication-coordination within the worker.
    37  	WorkerBufferSize int
    38  }
    39  
    40  // TODO FIXME name me
    41  type Worker struct {
    42  	// added accepts blocks from client
    43  	added    chan *blocks.Block
    44  	exchange exchange.Interface
    45  
    46  	// workQueue is owned by the client worker
    47  	// process manages life-cycle
    48  	process process.Process
    49  }
    50  
    51  func NewWorker(e exchange.Interface, c Config) *Worker {
    52  	if c.NumWorkers < 1 {
    53  		c.NumWorkers = 1 // provide a sane default
    54  	}
    55  	w := &Worker{
    56  		exchange: e,
    57  		added:    make(chan *blocks.Block, c.ClientBufferSize),
    58  		process:  process.WithParent(process.Background()), // internal management
    59  	}
    60  	w.start(c)
    61  	return w
    62  }
    63  
    64  func (w *Worker) HasBlock(b *blocks.Block) error {
    65  	select {
    66  	case <-w.process.Closed():
    67  		return errors.New("blockservice worker is closed")
    68  	case w.added <- b:
    69  		return nil
    70  	}
    71  }
    72  
    73  func (w *Worker) Close() error {
    74  	log.Debug("blockservice provide worker is shutting down...")
    75  	return w.process.Close()
    76  }
    77  
    78  func (w *Worker) start(c Config) {
    79  
    80  	workerChan := make(chan *blocks.Block, c.WorkerBufferSize)
    81  
    82  	// clientWorker handles incoming blocks from |w.added| and sends to
    83  	// |workerChan|. This will never block the client.
    84  	w.process.Go(func(proc process.Process) {
    85  		defer close(workerChan)
    86  
    87  		var workQueue BlockList
    88  		debugInfo := time.NewTicker(5 * time.Second)
    89  		defer debugInfo.Stop()
    90  		for {
    91  
    92  			// take advantage of the fact that sending on nil channel always
    93  			// blocks so that a message is only sent if a block exists
    94  			sendToWorker := workerChan
    95  			nextBlock := workQueue.Pop()
    96  			if nextBlock == nil {
    97  				sendToWorker = nil
    98  			}
    99  
   100  			select {
   101  
   102  			// if worker is ready and there's a block to process, send the
   103  			// block
   104  			case sendToWorker <- nextBlock:
   105  			case <-debugInfo.C:
   106  				if workQueue.Len() > 0 {
   107  					log.Debugf("%d blocks in blockservice provide queue...", workQueue.Len())
   108  				}
   109  			case block := <-w.added:
   110  				if nextBlock != nil {
   111  					workQueue.Push(nextBlock) // missed the chance to send it
   112  				}
   113  				// if the client sends another block, add it to the queue.
   114  				workQueue.Push(block)
   115  			case <-proc.Closing():
   116  				return
   117  			}
   118  		}
   119  	})
   120  
   121  	// reads from |workerChan| until w.process closes
   122  	limiter := ratelimit.NewRateLimiter(w.process, c.NumWorkers)
   123  	limiter.Go(func(proc process.Process) {
   124  		ctx := procctx.OnClosingContext(proc) // shut down in-progress HasBlock when time to die
   125  		for {
   126  			select {
   127  			case <-proc.Closing():
   128  				return
   129  			case block, ok := <-workerChan:
   130  				if !ok {
   131  					return
   132  				}
   133  				limiter.LimitedGo(func(proc process.Process) {
   134  					if err := w.exchange.HasBlock(ctx, block); err != nil {
   135  						log.Infof("blockservice worker error: %s", err)
   136  					}
   137  				})
   138  			}
   139  		}
   140  	})
   141  }
   142  
   143  type BlockList struct {
   144  	list    list.List
   145  	uniques map[key.Key]*list.Element
   146  }
   147  
   148  func (s *BlockList) PushFront(b *blocks.Block) {
   149  	if s.uniques == nil {
   150  		s.uniques = make(map[key.Key]*list.Element)
   151  	}
   152  	_, ok := s.uniques[b.Key()]
   153  	if !ok {
   154  		e := s.list.PushFront(b)
   155  		s.uniques[b.Key()] = e
   156  	}
   157  }
   158  
   159  func (s *BlockList) Push(b *blocks.Block) {
   160  	if s.uniques == nil {
   161  		s.uniques = make(map[key.Key]*list.Element)
   162  	}
   163  	_, ok := s.uniques[b.Key()]
   164  	if !ok {
   165  		e := s.list.PushBack(b)
   166  		s.uniques[b.Key()] = e
   167  	}
   168  }
   169  
   170  func (s *BlockList) Pop() *blocks.Block {
   171  	if s.list.Len() == 0 {
   172  		return nil
   173  	}
   174  	e := s.list.Front()
   175  	s.list.Remove(e)
   176  	b := e.Value.(*blocks.Block)
   177  	delete(s.uniques, b.Key())
   178  	return b
   179  }
   180  
   181  func (s *BlockList) Len() int {
   182  	return s.list.Len()
   183  }