github.com/reds/docker@v1.11.2-rc1/distribution/xfer/transfer.go (about)

     1  package xfer
     2  
     3  import (
     4  	"runtime"
     5  	"sync"
     6  
     7  	"github.com/docker/docker/pkg/progress"
     8  	"golang.org/x/net/context"
     9  )
    10  
    11  // DoNotRetry is an error wrapper indicating that the error cannot be resolved
    12  // with a retry.
    13  type DoNotRetry struct {
    14  	Err error
    15  }
    16  
    17  // Error returns the stringified representation of the encapsulated error.
    18  func (e DoNotRetry) Error() string {
    19  	return e.Err.Error()
    20  }
    21  
    22  // Watcher is returned by Watch and can be passed to Release to stop watching.
    23  type Watcher struct {
    24  	// signalChan is used to signal to the watcher goroutine that
    25  	// new progress information is available, or that the transfer
    26  	// has finished.
    27  	signalChan chan struct{}
    28  	// releaseChan signals to the watcher goroutine that the watcher
    29  	// should be detached.
    30  	releaseChan chan struct{}
    31  	// running remains open as long as the watcher is watching the
    32  	// transfer. It gets closed if the transfer finishes or the
    33  	// watcher is detached.
    34  	running chan struct{}
    35  }
    36  
    37  // Transfer represents an in-progress transfer.
    38  type Transfer interface {
    39  	Watch(progressOutput progress.Output) *Watcher
    40  	Release(*Watcher)
    41  	Context() context.Context
    42  	Close()
    43  	Done() <-chan struct{}
    44  	Released() <-chan struct{}
    45  	Broadcast(masterProgressChan <-chan progress.Progress)
    46  }
    47  
    48  type transfer struct {
    49  	mu sync.Mutex
    50  
    51  	ctx    context.Context
    52  	cancel context.CancelFunc
    53  
    54  	// watchers keeps track of the goroutines monitoring progress output,
    55  	// indexed by the channels that release them.
    56  	watchers map[chan struct{}]*Watcher
    57  
    58  	// lastProgress is the most recently received progress event.
    59  	lastProgress progress.Progress
    60  	// hasLastProgress is true when lastProgress has been set.
    61  	hasLastProgress bool
    62  
    63  	// running remains open as long as the transfer is in progress.
    64  	running chan struct{}
    65  	// released stays open until all watchers release the transfer and
    66  	// the transfer is no longer tracked by the transfer manager.
    67  	released chan struct{}
    68  
    69  	// broadcastDone is true if the master progress channel has closed.
    70  	broadcastDone bool
    71  	// closed is true if Close has been called
    72  	closed bool
    73  	// broadcastSyncChan allows watchers to "ping" the broadcasting
    74  	// goroutine to wait for it for deplete its input channel. This ensures
    75  	// a detaching watcher won't miss an event that was sent before it
    76  	// started detaching.
    77  	broadcastSyncChan chan struct{}
    78  }
    79  
    80  // NewTransfer creates a new transfer.
    81  func NewTransfer() Transfer {
    82  	t := &transfer{
    83  		watchers:          make(map[chan struct{}]*Watcher),
    84  		running:           make(chan struct{}),
    85  		released:          make(chan struct{}),
    86  		broadcastSyncChan: make(chan struct{}),
    87  	}
    88  
    89  	// This uses context.Background instead of a caller-supplied context
    90  	// so that a transfer won't be cancelled automatically if the client
    91  	// which requested it is ^C'd (there could be other viewers).
    92  	t.ctx, t.cancel = context.WithCancel(context.Background())
    93  
    94  	return t
    95  }
    96  
    97  // Broadcast copies the progress and error output to all viewers.
    98  func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) {
    99  	for {
   100  		var (
   101  			p  progress.Progress
   102  			ok bool
   103  		)
   104  		select {
   105  		case p, ok = <-masterProgressChan:
   106  		default:
   107  			// We've depleted the channel, so now we can handle
   108  			// reads on broadcastSyncChan to let detaching watchers
   109  			// know we're caught up.
   110  			select {
   111  			case <-t.broadcastSyncChan:
   112  				continue
   113  			case p, ok = <-masterProgressChan:
   114  			}
   115  		}
   116  
   117  		t.mu.Lock()
   118  		if ok {
   119  			t.lastProgress = p
   120  			t.hasLastProgress = true
   121  			for _, w := range t.watchers {
   122  				select {
   123  				case w.signalChan <- struct{}{}:
   124  				default:
   125  				}
   126  			}
   127  		} else {
   128  			t.broadcastDone = true
   129  		}
   130  		t.mu.Unlock()
   131  		if !ok {
   132  			close(t.running)
   133  			return
   134  		}
   135  	}
   136  }
   137  
   138  // Watch adds a watcher to the transfer. The supplied channel gets progress
   139  // updates and is closed when the transfer finishes.
   140  func (t *transfer) Watch(progressOutput progress.Output) *Watcher {
   141  	t.mu.Lock()
   142  	defer t.mu.Unlock()
   143  
   144  	w := &Watcher{
   145  		releaseChan: make(chan struct{}),
   146  		signalChan:  make(chan struct{}),
   147  		running:     make(chan struct{}),
   148  	}
   149  
   150  	t.watchers[w.releaseChan] = w
   151  
   152  	if t.broadcastDone {
   153  		close(w.running)
   154  		return w
   155  	}
   156  
   157  	go func() {
   158  		defer func() {
   159  			close(w.running)
   160  		}()
   161  		var (
   162  			done           bool
   163  			lastWritten    progress.Progress
   164  			hasLastWritten bool
   165  		)
   166  		for {
   167  			t.mu.Lock()
   168  			hasLastProgress := t.hasLastProgress
   169  			lastProgress := t.lastProgress
   170  			t.mu.Unlock()
   171  
   172  			// Make sure we don't write the last progress item
   173  			// twice.
   174  			if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) {
   175  				progressOutput.WriteProgress(lastProgress)
   176  				lastWritten = lastProgress
   177  				hasLastWritten = true
   178  			}
   179  
   180  			if done {
   181  				return
   182  			}
   183  
   184  			select {
   185  			case <-w.signalChan:
   186  			case <-w.releaseChan:
   187  				done = true
   188  				// Since the watcher is going to detach, make
   189  				// sure the broadcaster is caught up so we
   190  				// don't miss anything.
   191  				select {
   192  				case t.broadcastSyncChan <- struct{}{}:
   193  				case <-t.running:
   194  				}
   195  			case <-t.running:
   196  				done = true
   197  			}
   198  		}
   199  	}()
   200  
   201  	return w
   202  }
   203  
   204  // Release is the inverse of Watch; indicating that the watcher no longer wants
   205  // to be notified about the progress of the transfer. All calls to Watch must
   206  // be paired with later calls to Release so that the lifecycle of the transfer
   207  // is properly managed.
   208  func (t *transfer) Release(watcher *Watcher) {
   209  	t.mu.Lock()
   210  	delete(t.watchers, watcher.releaseChan)
   211  
   212  	if len(t.watchers) == 0 {
   213  		if t.closed {
   214  			// released may have been closed already if all
   215  			// watchers were released, then another one was added
   216  			// while waiting for a previous watcher goroutine to
   217  			// finish.
   218  			select {
   219  			case <-t.released:
   220  			default:
   221  				close(t.released)
   222  			}
   223  		} else {
   224  			t.cancel()
   225  		}
   226  	}
   227  	t.mu.Unlock()
   228  
   229  	close(watcher.releaseChan)
   230  	// Block until the watcher goroutine completes
   231  	<-watcher.running
   232  }
   233  
   234  // Done returns a channel which is closed if the transfer completes or is
   235  // cancelled. Note that having 0 watchers causes a transfer to be cancelled.
   236  func (t *transfer) Done() <-chan struct{} {
   237  	// Note that this doesn't return t.ctx.Done() because that channel will
   238  	// be closed the moment Cancel is called, and we need to return a
   239  	// channel that blocks until a cancellation is actually acknowledged by
   240  	// the transfer function.
   241  	return t.running
   242  }
   243  
   244  // Released returns a channel which is closed once all watchers release the
   245  // transfer AND the transfer is no longer tracked by the transfer manager.
   246  func (t *transfer) Released() <-chan struct{} {
   247  	return t.released
   248  }
   249  
   250  // Context returns the context associated with the transfer.
   251  func (t *transfer) Context() context.Context {
   252  	return t.ctx
   253  }
   254  
   255  // Close is called by the transfer manager when the transfer is no longer
   256  // being tracked.
   257  func (t *transfer) Close() {
   258  	t.mu.Lock()
   259  	t.closed = true
   260  	if len(t.watchers) == 0 {
   261  		close(t.released)
   262  	}
   263  	t.mu.Unlock()
   264  }
   265  
   266  // DoFunc is a function called by the transfer manager to actually perform
   267  // a transfer. It should be non-blocking. It should wait until the start channel
   268  // is closed before transferring any data. If the function closes inactive, that
   269  // signals to the transfer manager that the job is no longer actively moving
   270  // data - for example, it may be waiting for a dependent transfer to finish.
   271  // This prevents it from taking up a slot.
   272  type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer
   273  
   274  // TransferManager is used by LayerDownloadManager and LayerUploadManager to
   275  // schedule and deduplicate transfers. It is up to the TransferManager
   276  // implementation to make the scheduling and concurrency decisions.
   277  type TransferManager interface {
   278  	// Transfer checks if a transfer with the given key is in progress. If
   279  	// so, it returns progress and error output from that transfer.
   280  	// Otherwise, it will call xferFunc to initiate the transfer.
   281  	Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher)
   282  }
   283  
   284  type transferManager struct {
   285  	mu sync.Mutex
   286  
   287  	concurrencyLimit int
   288  	activeTransfers  int
   289  	transfers        map[string]Transfer
   290  	waitingTransfers []chan struct{}
   291  }
   292  
   293  // NewTransferManager returns a new TransferManager.
   294  func NewTransferManager(concurrencyLimit int) TransferManager {
   295  	return &transferManager{
   296  		concurrencyLimit: concurrencyLimit,
   297  		transfers:        make(map[string]Transfer),
   298  	}
   299  }
   300  
   301  // Transfer checks if a transfer matching the given key is in progress. If not,
   302  // it starts one by calling xferFunc. The caller supplies a channel which
   303  // receives progress output from the transfer.
   304  func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) {
   305  	tm.mu.Lock()
   306  	defer tm.mu.Unlock()
   307  
   308  	for {
   309  		xfer, present := tm.transfers[key]
   310  		if !present {
   311  			break
   312  		}
   313  		// Transfer is already in progress.
   314  		watcher := xfer.Watch(progressOutput)
   315  
   316  		select {
   317  		case <-xfer.Context().Done():
   318  			// We don't want to watch a transfer that has been cancelled.
   319  			// Wait for it to be removed from the map and try again.
   320  			xfer.Release(watcher)
   321  			tm.mu.Unlock()
   322  			// The goroutine that removes this transfer from the
   323  			// map is also waiting for xfer.Done(), so yield to it.
   324  			// This could be avoided by adding a Closed method
   325  			// to Transfer to allow explicitly waiting for it to be
   326  			// removed the map, but forcing a scheduling round in
   327  			// this very rare case seems better than bloating the
   328  			// interface definition.
   329  			runtime.Gosched()
   330  			<-xfer.Done()
   331  			tm.mu.Lock()
   332  		default:
   333  			return xfer, watcher
   334  		}
   335  	}
   336  
   337  	start := make(chan struct{})
   338  	inactive := make(chan struct{})
   339  
   340  	if tm.activeTransfers < tm.concurrencyLimit {
   341  		close(start)
   342  		tm.activeTransfers++
   343  	} else {
   344  		tm.waitingTransfers = append(tm.waitingTransfers, start)
   345  	}
   346  
   347  	masterProgressChan := make(chan progress.Progress)
   348  	xfer := xferFunc(masterProgressChan, start, inactive)
   349  	watcher := xfer.Watch(progressOutput)
   350  	go xfer.Broadcast(masterProgressChan)
   351  	tm.transfers[key] = xfer
   352  
   353  	// When the transfer is finished, remove from the map.
   354  	go func() {
   355  		for {
   356  			select {
   357  			case <-inactive:
   358  				tm.mu.Lock()
   359  				tm.inactivate(start)
   360  				tm.mu.Unlock()
   361  				inactive = nil
   362  			case <-xfer.Done():
   363  				tm.mu.Lock()
   364  				if inactive != nil {
   365  					tm.inactivate(start)
   366  				}
   367  				delete(tm.transfers, key)
   368  				tm.mu.Unlock()
   369  				xfer.Close()
   370  				return
   371  			}
   372  		}
   373  	}()
   374  
   375  	return xfer, watcher
   376  }
   377  
   378  func (tm *transferManager) inactivate(start chan struct{}) {
   379  	// If the transfer was started, remove it from the activeTransfers
   380  	// count.
   381  	select {
   382  	case <-start:
   383  		// Start next transfer if any are waiting
   384  		if len(tm.waitingTransfers) != 0 {
   385  			close(tm.waitingTransfers[0])
   386  			tm.waitingTransfers = tm.waitingTransfers[1:]
   387  		} else {
   388  			tm.activeTransfers--
   389  		}
   390  	default:
   391  	}
   392  }