github.com/tonistiigi/docker@v0.10.1-0.20240229224939-974013b0dc6a/distribution/xfer/transfer.go (about)

     1  package xfer // import "github.com/docker/docker/distribution/xfer"
     2  
     3  import (
     4  	"context"
     5  	"runtime"
     6  	"sync"
     7  
     8  	"github.com/docker/docker/pkg/progress"
     9  	"github.com/pkg/errors"
    10  )
    11  
    12  // DoNotRetry is an error wrapper indicating that the error cannot be resolved
    13  // with a retry.
    14  type DoNotRetry struct {
    15  	Err error
    16  }
    17  
    18  // Error returns the stringified representation of the encapsulated error.
    19  func (e DoNotRetry) Error() string {
    20  	return e.Err.Error()
    21  }
    22  
    23  // IsDoNotRetryError returns true if the error is caused by DoNotRetry error,
    24  // and the transfer should not be retried.
    25  func IsDoNotRetryError(err error) bool {
    26  	var dnr DoNotRetry
    27  	return errors.As(err, &dnr)
    28  }
    29  
    30  // watcher is returned by Watch and can be passed to Release to stop watching.
    31  type watcher struct {
    32  	// signalChan is used to signal to the watcher goroutine that
    33  	// new progress information is available, or that the transfer
    34  	// has finished.
    35  	signalChan chan struct{}
    36  	// releaseChan signals to the watcher goroutine that the watcher
    37  	// should be detached.
    38  	releaseChan chan struct{}
    39  	// running remains open as long as the watcher is watching the
    40  	// transfer. It gets closed if the transfer finishes or the
    41  	// watcher is detached.
    42  	running chan struct{}
    43  }
    44  
    45  // transfer represents an in-progress transfer.
    46  type transfer interface {
    47  	watch(progressOutput progress.Output) *watcher
    48  	release(*watcher)
    49  	context() context.Context
    50  	close()
    51  	done() <-chan struct{}
    52  	released() <-chan struct{}
    53  	broadcast(mainProgressChan <-chan progress.Progress)
    54  }
    55  
    56  type xfer struct {
    57  	mu sync.Mutex
    58  
    59  	ctx    context.Context
    60  	cancel context.CancelFunc
    61  
    62  	// watchers keeps track of the goroutines monitoring progress output,
    63  	// indexed by the channels that release them.
    64  	watchers map[chan struct{}]*watcher
    65  
    66  	// lastProgress is the most recently received progress event.
    67  	lastProgress progress.Progress
    68  	// hasLastProgress is true when lastProgress has been set.
    69  	hasLastProgress bool
    70  
    71  	// running remains open as long as the transfer is in progress.
    72  	running chan struct{}
    73  	// releasedChan stays open until all watchers release the transfer and
    74  	// the transfer is no longer tracked by the transferManager.
    75  	releasedChan chan struct{}
    76  
    77  	// broadcastDone is true if the main progress channel has closed.
    78  	broadcastDone bool
    79  	// closed is true if Close has been called
    80  	closed bool
    81  	// broadcastSyncChan allows watchers to "ping" the broadcasting
    82  	// goroutine to wait for it for deplete its input channel. This ensures
    83  	// a detaching watcher won't miss an event that was sent before it
    84  	// started detaching.
    85  	broadcastSyncChan chan struct{}
    86  }
    87  
    88  // newTransfer creates a new transfer.
    89  func newTransfer() transfer {
    90  	t := &xfer{
    91  		watchers:          make(map[chan struct{}]*watcher),
    92  		running:           make(chan struct{}),
    93  		releasedChan:      make(chan struct{}),
    94  		broadcastSyncChan: make(chan struct{}),
    95  	}
    96  
    97  	// This uses context.Background instead of a caller-supplied context
    98  	// so that a transfer won't be cancelled automatically if the client
    99  	// which requested it is ^C'd (there could be other viewers).
   100  	t.ctx, t.cancel = context.WithCancel(context.Background())
   101  
   102  	return t
   103  }
   104  
   105  // Broadcast copies the progress and error output to all viewers.
   106  func (t *xfer) broadcast(mainProgressChan <-chan progress.Progress) {
   107  	for {
   108  		var (
   109  			p  progress.Progress
   110  			ok bool
   111  		)
   112  		select {
   113  		case p, ok = <-mainProgressChan:
   114  		default:
   115  			// We've depleted the channel, so now we can handle
   116  			// reads on broadcastSyncChan to let detaching watchers
   117  			// know we're caught up.
   118  			select {
   119  			case <-t.broadcastSyncChan:
   120  				continue
   121  			case p, ok = <-mainProgressChan:
   122  			}
   123  		}
   124  
   125  		t.mu.Lock()
   126  		if ok {
   127  			t.lastProgress = p
   128  			t.hasLastProgress = true
   129  			for _, w := range t.watchers {
   130  				select {
   131  				case w.signalChan <- struct{}{}:
   132  				default:
   133  				}
   134  			}
   135  		} else {
   136  			t.broadcastDone = true
   137  		}
   138  		t.mu.Unlock()
   139  		if !ok {
   140  			close(t.running)
   141  			return
   142  		}
   143  	}
   144  }
   145  
   146  // Watch adds a watcher to the transfer. The supplied channel gets progress
   147  // updates and is closed when the transfer finishes.
   148  func (t *xfer) watch(progressOutput progress.Output) *watcher {
   149  	t.mu.Lock()
   150  	defer t.mu.Unlock()
   151  
   152  	w := &watcher{
   153  		releaseChan: make(chan struct{}),
   154  		signalChan:  make(chan struct{}),
   155  		running:     make(chan struct{}),
   156  	}
   157  
   158  	t.watchers[w.releaseChan] = w
   159  
   160  	if t.broadcastDone {
   161  		close(w.running)
   162  		return w
   163  	}
   164  
   165  	go func() {
   166  		defer func() {
   167  			close(w.running)
   168  		}()
   169  		var (
   170  			done           bool
   171  			lastWritten    progress.Progress
   172  			hasLastWritten bool
   173  		)
   174  		for {
   175  			t.mu.Lock()
   176  			hasLastProgress := t.hasLastProgress
   177  			lastProgress := t.lastProgress
   178  			t.mu.Unlock()
   179  
   180  			// Make sure we don't write the last progress item
   181  			// twice.
   182  			if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) {
   183  				progressOutput.WriteProgress(lastProgress)
   184  				lastWritten = lastProgress
   185  				hasLastWritten = true
   186  			}
   187  
   188  			if done {
   189  				return
   190  			}
   191  
   192  			select {
   193  			case <-w.signalChan:
   194  			case <-w.releaseChan:
   195  				done = true
   196  				// Since the watcher is going to detach, make
   197  				// sure the broadcaster is caught up so we
   198  				// don't miss anything.
   199  				select {
   200  				case t.broadcastSyncChan <- struct{}{}:
   201  				case <-t.running:
   202  				}
   203  			case <-t.running:
   204  				done = true
   205  			}
   206  		}
   207  	}()
   208  
   209  	return w
   210  }
   211  
   212  // Release is the inverse of Watch; indicating that the watcher no longer wants
   213  // to be notified about the progress of the transfer. All calls to Watch must
   214  // be paired with later calls to Release so that the lifecycle of the transfer
   215  // is properly managed.
   216  func (t *xfer) release(watcher *watcher) {
   217  	t.mu.Lock()
   218  	delete(t.watchers, watcher.releaseChan)
   219  
   220  	if len(t.watchers) == 0 {
   221  		if t.closed {
   222  			// released may have been closed already if all
   223  			// watchers were released, then another one was added
   224  			// while waiting for a previous watcher goroutine to
   225  			// finish.
   226  			select {
   227  			case <-t.releasedChan:
   228  			default:
   229  				close(t.releasedChan)
   230  			}
   231  		} else {
   232  			t.cancel()
   233  		}
   234  	}
   235  	t.mu.Unlock()
   236  
   237  	close(watcher.releaseChan)
   238  	// Block until the watcher goroutine completes
   239  	<-watcher.running
   240  }
   241  
   242  // Done returns a channel which is closed if the transfer completes or is
   243  // cancelled. Note that having 0 watchers causes a transfer to be cancelled.
   244  func (t *xfer) done() <-chan struct{} {
   245  	// Note that this doesn't return t.ctx.Done() because that channel will
   246  	// be closed the moment Cancel is called, and we need to return a
   247  	// channel that blocks until a cancellation is actually acknowledged by
   248  	// the transfer function.
   249  	return t.running
   250  }
   251  
   252  // Released returns a channel which is closed once all watchers release the
   253  // transfer AND the transfer is no longer tracked by the transferManager.
   254  func (t *xfer) released() <-chan struct{} {
   255  	return t.releasedChan
   256  }
   257  
   258  // Context returns the context associated with the transfer.
   259  func (t *xfer) context() context.Context {
   260  	return t.ctx
   261  }
   262  
   263  // Close is called by the transferManager when the transfer is no longer
   264  // being tracked.
   265  func (t *xfer) close() {
   266  	t.mu.Lock()
   267  	t.closed = true
   268  	if len(t.watchers) == 0 {
   269  		close(t.releasedChan)
   270  	}
   271  	t.mu.Unlock()
   272  }
   273  
   274  // doFunc is a function called by the transferManager to actually perform
   275  // a transfer. It should be non-blocking. It should wait until the start channel
   276  // is closed before transferring any data. If the function closes inactive, that
   277  // signals to the transferManager that the job is no longer actively moving
   278  // data - for example, it may be waiting for a dependent transfer to finish.
   279  // This prevents it from taking up a slot.
   280  type doFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) transfer
   281  
   282  // transferManager is used by LayerDownloadManager and LayerUploadManager to
   283  // schedule and deduplicate transfers. It is up to the transferManager
   284  // to make the scheduling and concurrency decisions.
   285  type transferManager struct {
   286  	mu sync.Mutex
   287  
   288  	concurrencyLimit int
   289  	activeTransfers  int
   290  	transfers        map[string]transfer
   291  	waitingTransfers []chan struct{}
   292  }
   293  
   294  // newTransferManager returns a new transferManager.
   295  func newTransferManager(concurrencyLimit int) *transferManager {
   296  	return &transferManager{
   297  		concurrencyLimit: concurrencyLimit,
   298  		transfers:        make(map[string]transfer),
   299  	}
   300  }
   301  
   302  // setConcurrency sets the concurrencyLimit
   303  func (tm *transferManager) setConcurrency(concurrency int) {
   304  	tm.mu.Lock()
   305  	tm.concurrencyLimit = concurrency
   306  	tm.mu.Unlock()
   307  }
   308  
   309  // transfer checks if a transfer matching the given key is in progress. If not,
   310  // it starts one by calling xferFunc. The caller supplies a channel which
   311  // receives progress output from the transfer.
   312  func (tm *transferManager) transfer(key string, xferFunc doFunc, progressOutput progress.Output) (transfer, *watcher) {
   313  	tm.mu.Lock()
   314  	defer tm.mu.Unlock()
   315  
   316  	for {
   317  		xfer, present := tm.transfers[key]
   318  		if !present {
   319  			break
   320  		}
   321  		// transfer is already in progress.
   322  		watcher := xfer.watch(progressOutput)
   323  
   324  		select {
   325  		case <-xfer.context().Done():
   326  			// We don't want to watch a transfer that has been cancelled.
   327  			// Wait for it to be removed from the map and try again.
   328  			xfer.release(watcher)
   329  			tm.mu.Unlock()
   330  			// The goroutine that removes this transfer from the
   331  			// map is also waiting for xfer.Done(), so yield to it.
   332  			// This could be avoided by adding a Closed method
   333  			// to transfer to allow explicitly waiting for it to be
   334  			// removed the map, but forcing a scheduling round in
   335  			// this very rare case seems better than bloating the
   336  			// interface definition.
   337  			runtime.Gosched()
   338  			<-xfer.done()
   339  			tm.mu.Lock()
   340  		default:
   341  			return xfer, watcher
   342  		}
   343  	}
   344  
   345  	start := make(chan struct{})
   346  	inactive := make(chan struct{})
   347  
   348  	if tm.concurrencyLimit == 0 || tm.activeTransfers < tm.concurrencyLimit {
   349  		close(start)
   350  		tm.activeTransfers++
   351  	} else {
   352  		tm.waitingTransfers = append(tm.waitingTransfers, start)
   353  	}
   354  
   355  	mainProgressChan := make(chan progress.Progress)
   356  	xfer := xferFunc(mainProgressChan, start, inactive)
   357  	watcher := xfer.watch(progressOutput)
   358  	go xfer.broadcast(mainProgressChan)
   359  	tm.transfers[key] = xfer
   360  
   361  	// When the transfer is finished, remove from the map.
   362  	go func() {
   363  		for {
   364  			select {
   365  			case <-inactive:
   366  				tm.mu.Lock()
   367  				tm.inactivate(start)
   368  				tm.mu.Unlock()
   369  				inactive = nil
   370  			case <-xfer.done():
   371  				tm.mu.Lock()
   372  				if inactive != nil {
   373  					tm.inactivate(start)
   374  				}
   375  				delete(tm.transfers, key)
   376  				tm.mu.Unlock()
   377  				xfer.close()
   378  				return
   379  			}
   380  		}
   381  	}()
   382  
   383  	return xfer, watcher
   384  }
   385  
   386  func (tm *transferManager) inactivate(start chan struct{}) {
   387  	// If the transfer was started, remove it from the activeTransfers
   388  	// count.
   389  	select {
   390  	case <-start:
   391  		// Start next transfer if any are waiting
   392  		if len(tm.waitingTransfers) != 0 {
   393  			close(tm.waitingTransfers[0])
   394  			tm.waitingTransfers = tm.waitingTransfers[1:]
   395  		} else {
   396  			tm.activeTransfers--
   397  		}
   398  	default:
   399  	}
   400  }