github.com/vincentwoo/docker@v0.7.3-0.20160116130405-82401a4b13c0/distribution/xfer/transfer.go (about)

     1  package xfer
     2  
     3  import (
     4  	"sync"
     5  
     6  	"github.com/docker/docker/pkg/progress"
     7  	"golang.org/x/net/context"
     8  )
     9  
    10  // DoNotRetry is an error wrapper indicating that the error cannot be resolved
    11  // with a retry.
    12  type DoNotRetry struct {
    13  	Err error
    14  }
    15  
    16  // Error returns the stringified representation of the encapsulated error.
    17  func (e DoNotRetry) Error() string {
    18  	return e.Err.Error()
    19  }
    20  
    21  // Watcher is returned by Watch and can be passed to Release to stop watching.
    22  type Watcher struct {
    23  	// signalChan is used to signal to the watcher goroutine that
    24  	// new progress information is available, or that the transfer
    25  	// has finished.
    26  	signalChan chan struct{}
    27  	// releaseChan signals to the watcher goroutine that the watcher
    28  	// should be detached.
    29  	releaseChan chan struct{}
    30  	// running remains open as long as the watcher is watching the
    31  	// transfer. It gets closed if the transfer finishes or the
    32  	// watcher is detached.
    33  	running chan struct{}
    34  }
    35  
    36  // Transfer represents an in-progress transfer.
    37  type Transfer interface {
    38  	Watch(progressOutput progress.Output) *Watcher
    39  	Release(*Watcher)
    40  	Context() context.Context
    41  	Cancel()
    42  	Done() <-chan struct{}
    43  	Released() <-chan struct{}
    44  	Broadcast(masterProgressChan <-chan progress.Progress)
    45  }
    46  
    47  type transfer struct {
    48  	mu sync.Mutex
    49  
    50  	ctx    context.Context
    51  	cancel context.CancelFunc
    52  
    53  	// watchers keeps track of the goroutines monitoring progress output,
    54  	// indexed by the channels that release them.
    55  	watchers map[chan struct{}]*Watcher
    56  
    57  	// lastProgress is the most recently received progress event.
    58  	lastProgress progress.Progress
    59  	// hasLastProgress is true when lastProgress has been set.
    60  	hasLastProgress bool
    61  
    62  	// running remains open as long as the transfer is in progress.
    63  	running chan struct{}
    64  	// hasWatchers stays open until all watchers release the transfer.
    65  	hasWatchers chan struct{}
    66  
    67  	// broadcastDone is true if the master progress channel has closed.
    68  	broadcastDone bool
    69  	// broadcastSyncChan allows watchers to "ping" the broadcasting
    70  	// goroutine to wait for it for deplete its input channel. This ensures
    71  	// a detaching watcher won't miss an event that was sent before it
    72  	// started detaching.
    73  	broadcastSyncChan chan struct{}
    74  }
    75  
    76  // NewTransfer creates a new transfer.
    77  func NewTransfer() Transfer {
    78  	t := &transfer{
    79  		watchers:          make(map[chan struct{}]*Watcher),
    80  		running:           make(chan struct{}),
    81  		hasWatchers:       make(chan struct{}),
    82  		broadcastSyncChan: make(chan struct{}),
    83  	}
    84  
    85  	// This uses context.Background instead of a caller-supplied context
    86  	// so that a transfer won't be cancelled automatically if the client
    87  	// which requested it is ^C'd (there could be other viewers).
    88  	t.ctx, t.cancel = context.WithCancel(context.Background())
    89  
    90  	return t
    91  }
    92  
    93  // Broadcast copies the progress and error output to all viewers.
    94  func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) {
    95  	for {
    96  		var (
    97  			p  progress.Progress
    98  			ok bool
    99  		)
   100  		select {
   101  		case p, ok = <-masterProgressChan:
   102  		default:
   103  			// We've depleted the channel, so now we can handle
   104  			// reads on broadcastSyncChan to let detaching watchers
   105  			// know we're caught up.
   106  			select {
   107  			case <-t.broadcastSyncChan:
   108  				continue
   109  			case p, ok = <-masterProgressChan:
   110  			}
   111  		}
   112  
   113  		t.mu.Lock()
   114  		if ok {
   115  			t.lastProgress = p
   116  			t.hasLastProgress = true
   117  			for _, w := range t.watchers {
   118  				select {
   119  				case w.signalChan <- struct{}{}:
   120  				default:
   121  				}
   122  			}
   123  
   124  		} else {
   125  			t.broadcastDone = true
   126  		}
   127  		t.mu.Unlock()
   128  		if !ok {
   129  			close(t.running)
   130  			return
   131  		}
   132  	}
   133  }
   134  
   135  // Watch adds a watcher to the transfer. The supplied channel gets progress
   136  // updates and is closed when the transfer finishes.
   137  func (t *transfer) Watch(progressOutput progress.Output) *Watcher {
   138  	t.mu.Lock()
   139  	defer t.mu.Unlock()
   140  
   141  	w := &Watcher{
   142  		releaseChan: make(chan struct{}),
   143  		signalChan:  make(chan struct{}),
   144  		running:     make(chan struct{}),
   145  	}
   146  
   147  	if t.broadcastDone {
   148  		close(w.running)
   149  		return w
   150  	}
   151  
   152  	t.watchers[w.releaseChan] = w
   153  
   154  	go func() {
   155  		defer func() {
   156  			close(w.running)
   157  		}()
   158  		done := false
   159  		for {
   160  			t.mu.Lock()
   161  			hasLastProgress := t.hasLastProgress
   162  			lastProgress := t.lastProgress
   163  			t.mu.Unlock()
   164  
   165  			// This might write the last progress item a
   166  			// second time (since channel closure also gets
   167  			// us here), but that's fine.
   168  			if hasLastProgress {
   169  				progressOutput.WriteProgress(lastProgress)
   170  			}
   171  
   172  			if done {
   173  				return
   174  			}
   175  
   176  			select {
   177  			case <-w.signalChan:
   178  			case <-w.releaseChan:
   179  				done = true
   180  				// Since the watcher is going to detach, make
   181  				// sure the broadcaster is caught up so we
   182  				// don't miss anything.
   183  				select {
   184  				case t.broadcastSyncChan <- struct{}{}:
   185  				case <-t.running:
   186  				}
   187  			case <-t.running:
   188  				done = true
   189  			}
   190  		}
   191  	}()
   192  
   193  	return w
   194  }
   195  
   196  // Release is the inverse of Watch; indicating that the watcher no longer wants
   197  // to be notified about the progress of the transfer. All calls to Watch must
   198  // be paired with later calls to Release so that the lifecycle of the transfer
   199  // is properly managed.
   200  func (t *transfer) Release(watcher *Watcher) {
   201  	t.mu.Lock()
   202  	delete(t.watchers, watcher.releaseChan)
   203  
   204  	if len(t.watchers) == 0 {
   205  		close(t.hasWatchers)
   206  		t.cancel()
   207  	}
   208  	t.mu.Unlock()
   209  
   210  	close(watcher.releaseChan)
   211  	// Block until the watcher goroutine completes
   212  	<-watcher.running
   213  }
   214  
   215  // Done returns a channel which is closed if the transfer completes or is
   216  // cancelled. Note that having 0 watchers causes a transfer to be cancelled.
   217  func (t *transfer) Done() <-chan struct{} {
   218  	// Note that this doesn't return t.ctx.Done() because that channel will
   219  	// be closed the moment Cancel is called, and we need to return a
   220  	// channel that blocks until a cancellation is actually acknowledged by
   221  	// the transfer function.
   222  	return t.running
   223  }
   224  
   225  // Released returns a channel which is closed once all watchers release the
   226  // transfer.
   227  func (t *transfer) Released() <-chan struct{} {
   228  	return t.hasWatchers
   229  }
   230  
   231  // Context returns the context associated with the transfer.
   232  func (t *transfer) Context() context.Context {
   233  	return t.ctx
   234  }
   235  
   236  // Cancel cancels the context associated with the transfer.
   237  func (t *transfer) Cancel() {
   238  	t.cancel()
   239  }
   240  
   241  // DoFunc is a function called by the transfer manager to actually perform
   242  // a transfer. It should be non-blocking. It should wait until the start channel
   243  // is closed before transferring any data. If the function closes inactive, that
   244  // signals to the transfer manager that the job is no longer actively moving
   245  // data - for example, it may be waiting for a dependent transfer to finish.
   246  // This prevents it from taking up a slot.
   247  type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer
   248  
   249  // TransferManager is used by LayerDownloadManager and LayerUploadManager to
   250  // schedule and deduplicate transfers. It is up to the TransferManager
   251  // implementation to make the scheduling and concurrency decisions.
   252  type TransferManager interface {
   253  	// Transfer checks if a transfer with the given key is in progress. If
   254  	// so, it returns progress and error output from that transfer.
   255  	// Otherwise, it will call xferFunc to initiate the transfer.
   256  	Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher)
   257  }
   258  
   259  type transferManager struct {
   260  	mu sync.Mutex
   261  
   262  	concurrencyLimit int
   263  	activeTransfers  int
   264  	transfers        map[string]Transfer
   265  	waitingTransfers []chan struct{}
   266  }
   267  
   268  // NewTransferManager returns a new TransferManager.
   269  func NewTransferManager(concurrencyLimit int) TransferManager {
   270  	return &transferManager{
   271  		concurrencyLimit: concurrencyLimit,
   272  		transfers:        make(map[string]Transfer),
   273  	}
   274  }
   275  
   276  // Transfer checks if a transfer matching the given key is in progress. If not,
   277  // it starts one by calling xferFunc. The caller supplies a channel which
   278  // receives progress output from the transfer.
   279  func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) {
   280  	tm.mu.Lock()
   281  	defer tm.mu.Unlock()
   282  
   283  	if xfer, present := tm.transfers[key]; present {
   284  		// Transfer is already in progress.
   285  		watcher := xfer.Watch(progressOutput)
   286  		return xfer, watcher
   287  	}
   288  
   289  	start := make(chan struct{})
   290  	inactive := make(chan struct{})
   291  
   292  	if tm.activeTransfers < tm.concurrencyLimit {
   293  		close(start)
   294  		tm.activeTransfers++
   295  	} else {
   296  		tm.waitingTransfers = append(tm.waitingTransfers, start)
   297  	}
   298  
   299  	masterProgressChan := make(chan progress.Progress)
   300  	xfer := xferFunc(masterProgressChan, start, inactive)
   301  	watcher := xfer.Watch(progressOutput)
   302  	go xfer.Broadcast(masterProgressChan)
   303  	tm.transfers[key] = xfer
   304  
   305  	// When the transfer is finished, remove from the map.
   306  	go func() {
   307  		for {
   308  			select {
   309  			case <-inactive:
   310  				tm.mu.Lock()
   311  				tm.inactivate(start)
   312  				tm.mu.Unlock()
   313  				inactive = nil
   314  			case <-xfer.Done():
   315  				tm.mu.Lock()
   316  				if inactive != nil {
   317  					tm.inactivate(start)
   318  				}
   319  				delete(tm.transfers, key)
   320  				tm.mu.Unlock()
   321  				return
   322  			}
   323  		}
   324  	}()
   325  
   326  	return xfer, watcher
   327  }
   328  
   329  func (tm *transferManager) inactivate(start chan struct{}) {
   330  	// If the transfer was started, remove it from the activeTransfers
   331  	// count.
   332  	select {
   333  	case <-start:
   334  		// Start next transfer if any are waiting
   335  		if len(tm.waitingTransfers) != 0 {
   336  			close(tm.waitingTransfers[0])
   337  			tm.waitingTransfers = tm.waitingTransfers[1:]
   338  		} else {
   339  			tm.activeTransfers--
   340  		}
   341  	default:
   342  	}
   343  }