github.com/uriddle/docker@v0.0.0-20210926094723-4072e6aeb013/distribution/xfer/transfer.go (about)

     1  package xfer
     2  
     3  import (
     4  	"runtime"
     5  	"sync"
     6  
     7  	"github.com/docker/docker/pkg/progress"
     8  	"golang.org/x/net/context"
     9  )
    10  
    11  // DoNotRetry is an error wrapper indicating that the error cannot be resolved
    12  // with a retry.
    13  type DoNotRetry struct {
    14  	Err error
    15  }
    16  
    17  // Error returns the stringified representation of the encapsulated error.
    18  func (e DoNotRetry) Error() string {
    19  	return e.Err.Error()
    20  }
    21  
    22  // Watcher is returned by Watch and can be passed to Release to stop watching.
    23  type Watcher struct {
    24  	// signalChan is used to signal to the watcher goroutine that
    25  	// new progress information is available, or that the transfer
    26  	// has finished.
    27  	signalChan chan struct{}
    28  	// releaseChan signals to the watcher goroutine that the watcher
    29  	// should be detached.
    30  	releaseChan chan struct{}
    31  	// running remains open as long as the watcher is watching the
    32  	// transfer. It gets closed if the transfer finishes or the
    33  	// watcher is detached.
    34  	running chan struct{}
    35  }
    36  
    37  // Transfer represents an in-progress transfer.
    38  type Transfer interface {
    39  	Watch(progressOutput progress.Output) *Watcher
    40  	Release(*Watcher)
    41  	Context() context.Context
    42  	Close()
    43  	Done() <-chan struct{}
    44  	Released() <-chan struct{}
    45  	Broadcast(masterProgressChan <-chan progress.Progress)
    46  }
    47  
    48  type transfer struct {
    49  	mu sync.Mutex
    50  
    51  	ctx    context.Context
    52  	cancel context.CancelFunc
    53  
    54  	// watchers keeps track of the goroutines monitoring progress output,
    55  	// indexed by the channels that release them.
    56  	watchers map[chan struct{}]*Watcher
    57  
    58  	// lastProgress is the most recently received progress event.
    59  	lastProgress progress.Progress
    60  	// hasLastProgress is true when lastProgress has been set.
    61  	hasLastProgress bool
    62  
    63  	// running remains open as long as the transfer is in progress.
    64  	running chan struct{}
    65  	// released stays open until all watchers release the transfer and
    66  	// the transfer is no longer tracked by the transfer manager.
    67  	released chan struct{}
    68  
    69  	// broadcastDone is true if the master progress channel has closed.
    70  	broadcastDone bool
    71  	// closed is true if Close has been called
    72  	closed bool
    73  	// broadcastSyncChan allows watchers to "ping" the broadcasting
    74  	// goroutine to wait for it for deplete its input channel. This ensures
    75  	// a detaching watcher won't miss an event that was sent before it
    76  	// started detaching.
    77  	broadcastSyncChan chan struct{}
    78  }
    79  
    80  // NewTransfer creates a new transfer.
    81  func NewTransfer() Transfer {
    82  	t := &transfer{
    83  		watchers:          make(map[chan struct{}]*Watcher),
    84  		running:           make(chan struct{}),
    85  		released:          make(chan struct{}),
    86  		broadcastSyncChan: make(chan struct{}),
    87  	}
    88  
    89  	// This uses context.Background instead of a caller-supplied context
    90  	// so that a transfer won't be cancelled automatically if the client
    91  	// which requested it is ^C'd (there could be other viewers).
    92  	t.ctx, t.cancel = context.WithCancel(context.Background())
    93  
    94  	return t
    95  }
    96  
    97  // Broadcast copies the progress and error output to all viewers.
    98  func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) {
    99  	for {
   100  		var (
   101  			p  progress.Progress
   102  			ok bool
   103  		)
   104  		select {
   105  		case p, ok = <-masterProgressChan:
   106  		default:
   107  			// We've depleted the channel, so now we can handle
   108  			// reads on broadcastSyncChan to let detaching watchers
   109  			// know we're caught up.
   110  			select {
   111  			case <-t.broadcastSyncChan:
   112  				continue
   113  			case p, ok = <-masterProgressChan:
   114  			}
   115  		}
   116  
   117  		t.mu.Lock()
   118  		if ok {
   119  			t.lastProgress = p
   120  			t.hasLastProgress = true
   121  			for _, w := range t.watchers {
   122  				select {
   123  				case w.signalChan <- struct{}{}:
   124  				default:
   125  				}
   126  			}
   127  
   128  		} else {
   129  			t.broadcastDone = true
   130  		}
   131  		t.mu.Unlock()
   132  		if !ok {
   133  			close(t.running)
   134  			return
   135  		}
   136  	}
   137  }
   138  
   139  // Watch adds a watcher to the transfer. The supplied channel gets progress
   140  // updates and is closed when the transfer finishes.
   141  func (t *transfer) Watch(progressOutput progress.Output) *Watcher {
   142  	t.mu.Lock()
   143  	defer t.mu.Unlock()
   144  
   145  	w := &Watcher{
   146  		releaseChan: make(chan struct{}),
   147  		signalChan:  make(chan struct{}),
   148  		running:     make(chan struct{}),
   149  	}
   150  
   151  	t.watchers[w.releaseChan] = w
   152  
   153  	if t.broadcastDone {
   154  		close(w.running)
   155  		return w
   156  	}
   157  
   158  	go func() {
   159  		defer func() {
   160  			close(w.running)
   161  		}()
   162  		done := false
   163  		for {
   164  			t.mu.Lock()
   165  			hasLastProgress := t.hasLastProgress
   166  			lastProgress := t.lastProgress
   167  			t.mu.Unlock()
   168  
   169  			// This might write the last progress item a
   170  			// second time (since channel closure also gets
   171  			// us here), but that's fine.
   172  			if hasLastProgress {
   173  				progressOutput.WriteProgress(lastProgress)
   174  			}
   175  
   176  			if done {
   177  				return
   178  			}
   179  
   180  			select {
   181  			case <-w.signalChan:
   182  			case <-w.releaseChan:
   183  				done = true
   184  				// Since the watcher is going to detach, make
   185  				// sure the broadcaster is caught up so we
   186  				// don't miss anything.
   187  				select {
   188  				case t.broadcastSyncChan <- struct{}{}:
   189  				case <-t.running:
   190  				}
   191  			case <-t.running:
   192  				done = true
   193  			}
   194  		}
   195  	}()
   196  
   197  	return w
   198  }
   199  
   200  // Release is the inverse of Watch; indicating that the watcher no longer wants
   201  // to be notified about the progress of the transfer. All calls to Watch must
   202  // be paired with later calls to Release so that the lifecycle of the transfer
   203  // is properly managed.
   204  func (t *transfer) Release(watcher *Watcher) {
   205  	t.mu.Lock()
   206  	delete(t.watchers, watcher.releaseChan)
   207  
   208  	if len(t.watchers) == 0 {
   209  		if t.closed {
   210  			// released may have been closed already if all
   211  			// watchers were released, then another one was added
   212  			// while waiting for a previous watcher goroutine to
   213  			// finish.
   214  			select {
   215  			case <-t.released:
   216  			default:
   217  				close(t.released)
   218  			}
   219  		} else {
   220  			t.cancel()
   221  		}
   222  	}
   223  	t.mu.Unlock()
   224  
   225  	close(watcher.releaseChan)
   226  	// Block until the watcher goroutine completes
   227  	<-watcher.running
   228  }
   229  
   230  // Done returns a channel which is closed if the transfer completes or is
   231  // cancelled. Note that having 0 watchers causes a transfer to be cancelled.
   232  func (t *transfer) Done() <-chan struct{} {
   233  	// Note that this doesn't return t.ctx.Done() because that channel will
   234  	// be closed the moment Cancel is called, and we need to return a
   235  	// channel that blocks until a cancellation is actually acknowledged by
   236  	// the transfer function.
   237  	return t.running
   238  }
   239  
   240  // Released returns a channel which is closed once all watchers release the
   241  // transfer AND the transfer is no longer tracked by the transfer manager.
   242  func (t *transfer) Released() <-chan struct{} {
   243  	return t.released
   244  }
   245  
   246  // Context returns the context associated with the transfer.
   247  func (t *transfer) Context() context.Context {
   248  	return t.ctx
   249  }
   250  
   251  // Close is called by the transfer manager when the transfer is no longer
   252  // being tracked.
   253  func (t *transfer) Close() {
   254  	t.mu.Lock()
   255  	t.closed = true
   256  	if len(t.watchers) == 0 {
   257  		close(t.released)
   258  	}
   259  	t.mu.Unlock()
   260  }
   261  
   262  // DoFunc is a function called by the transfer manager to actually perform
   263  // a transfer. It should be non-blocking. It should wait until the start channel
   264  // is closed before transferring any data. If the function closes inactive, that
   265  // signals to the transfer manager that the job is no longer actively moving
   266  // data - for example, it may be waiting for a dependent transfer to finish.
   267  // This prevents it from taking up a slot.
   268  type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer
   269  
   270  // TransferManager is used by LayerDownloadManager and LayerUploadManager to
   271  // schedule and deduplicate transfers. It is up to the TransferManager
   272  // implementation to make the scheduling and concurrency decisions.
   273  type TransferManager interface {
   274  	// Transfer checks if a transfer with the given key is in progress. If
   275  	// so, it returns progress and error output from that transfer.
   276  	// Otherwise, it will call xferFunc to initiate the transfer.
   277  	Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher)
   278  }
   279  
   280  type transferManager struct {
   281  	mu sync.Mutex
   282  
   283  	concurrencyLimit int
   284  	activeTransfers  int
   285  	transfers        map[string]Transfer
   286  	waitingTransfers []chan struct{}
   287  }
   288  
   289  // NewTransferManager returns a new TransferManager.
   290  func NewTransferManager(concurrencyLimit int) TransferManager {
   291  	return &transferManager{
   292  		concurrencyLimit: concurrencyLimit,
   293  		transfers:        make(map[string]Transfer),
   294  	}
   295  }
   296  
   297  // Transfer checks if a transfer matching the given key is in progress. If not,
   298  // it starts one by calling xferFunc. The caller supplies a channel which
   299  // receives progress output from the transfer.
   300  func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) {
   301  	tm.mu.Lock()
   302  	defer tm.mu.Unlock()
   303  
   304  	for {
   305  		xfer, present := tm.transfers[key]
   306  		if !present {
   307  			break
   308  		}
   309  		// Transfer is already in progress.
   310  		watcher := xfer.Watch(progressOutput)
   311  
   312  		select {
   313  		case <-xfer.Context().Done():
   314  			// We don't want to watch a transfer that has been cancelled.
   315  			// Wait for it to be removed from the map and try again.
   316  			xfer.Release(watcher)
   317  			tm.mu.Unlock()
   318  			// The goroutine that removes this transfer from the
   319  			// map is also waiting for xfer.Done(), so yield to it.
   320  			// This could be avoided by adding a Closed method
   321  			// to Transfer to allow explicitly waiting for it to be
   322  			// removed the map, but forcing a scheduling round in
   323  			// this very rare case seems better than bloating the
   324  			// interface definition.
   325  			runtime.Gosched()
   326  			<-xfer.Done()
   327  			tm.mu.Lock()
   328  		default:
   329  			return xfer, watcher
   330  		}
   331  	}
   332  
   333  	start := make(chan struct{})
   334  	inactive := make(chan struct{})
   335  
   336  	if tm.activeTransfers < tm.concurrencyLimit {
   337  		close(start)
   338  		tm.activeTransfers++
   339  	} else {
   340  		tm.waitingTransfers = append(tm.waitingTransfers, start)
   341  	}
   342  
   343  	masterProgressChan := make(chan progress.Progress)
   344  	xfer := xferFunc(masterProgressChan, start, inactive)
   345  	watcher := xfer.Watch(progressOutput)
   346  	go xfer.Broadcast(masterProgressChan)
   347  	tm.transfers[key] = xfer
   348  
   349  	// When the transfer is finished, remove from the map.
   350  	go func() {
   351  		for {
   352  			select {
   353  			case <-inactive:
   354  				tm.mu.Lock()
   355  				tm.inactivate(start)
   356  				tm.mu.Unlock()
   357  				inactive = nil
   358  			case <-xfer.Done():
   359  				tm.mu.Lock()
   360  				if inactive != nil {
   361  					tm.inactivate(start)
   362  				}
   363  				delete(tm.transfers, key)
   364  				tm.mu.Unlock()
   365  				xfer.Close()
   366  				return
   367  			}
   368  		}
   369  	}()
   370  
   371  	return xfer, watcher
   372  }
   373  
   374  func (tm *transferManager) inactivate(start chan struct{}) {
   375  	// If the transfer was started, remove it from the activeTransfers
   376  	// count.
   377  	select {
   378  	case <-start:
   379  		// Start next transfer if any are waiting
   380  		if len(tm.waitingTransfers) != 0 {
   381  			close(tm.waitingTransfers[0])
   382  			tm.waitingTransfers = tm.waitingTransfers[1:]
   383  		} else {
   384  			tm.activeTransfers--
   385  		}
   386  	default:
   387  	}
   388  }