github.com/demonoid81/moby@v0.0.0-20200517203328-62dd8e17c460/distribution/xfer/transfer.go (about)

     1  package xfer // import "github.com/demonoid81/moby/distribution/xfer"
     2  
     3  import (
     4  	"context"
     5  	"runtime"
     6  	"sync"
     7  
     8  	"github.com/demonoid81/moby/pkg/progress"
     9  )
    10  
    11  // DoNotRetry is an error wrapper indicating that the error cannot be resolved
    12  // with a retry.
    13  type DoNotRetry struct {
    14  	Err error
    15  }
    16  
    17  // Error returns the stringified representation of the encapsulated error.
    18  func (e DoNotRetry) Error() string {
    19  	return e.Err.Error()
    20  }
    21  
    22  // Watcher is returned by Watch and can be passed to Release to stop watching.
    23  type Watcher struct {
    24  	// signalChan is used to signal to the watcher goroutine that
    25  	// new progress information is available, or that the transfer
    26  	// has finished.
    27  	signalChan chan struct{}
    28  	// releaseChan signals to the watcher goroutine that the watcher
    29  	// should be detached.
    30  	releaseChan chan struct{}
    31  	// running remains open as long as the watcher is watching the
    32  	// transfer. It gets closed if the transfer finishes or the
    33  	// watcher is detached.
    34  	running chan struct{}
    35  }
    36  
    37  // Transfer represents an in-progress transfer.
    38  type Transfer interface {
    39  	Watch(progressOutput progress.Output) *Watcher
    40  	Release(*Watcher)
    41  	Context() context.Context
    42  	Close()
    43  	Done() <-chan struct{}
    44  	Released() <-chan struct{}
    45  	Broadcast(masterProgressChan <-chan progress.Progress)
    46  }
    47  
    48  type transfer struct {
    49  	mu sync.Mutex
    50  
    51  	ctx    context.Context
    52  	cancel context.CancelFunc
    53  
    54  	// watchers keeps track of the goroutines monitoring progress output,
    55  	// indexed by the channels that release them.
    56  	watchers map[chan struct{}]*Watcher
    57  
    58  	// lastProgress is the most recently received progress event.
    59  	lastProgress progress.Progress
    60  	// hasLastProgress is true when lastProgress has been set.
    61  	hasLastProgress bool
    62  
    63  	// running remains open as long as the transfer is in progress.
    64  	running chan struct{}
    65  	// released stays open until all watchers release the transfer and
    66  	// the transfer is no longer tracked by the transfer manager.
    67  	released chan struct{}
    68  
    69  	// broadcastDone is true if the master progress channel has closed.
    70  	broadcastDone bool
    71  	// closed is true if Close has been called
    72  	closed bool
    73  	// broadcastSyncChan allows watchers to "ping" the broadcasting
    74  	// goroutine to wait for it for deplete its input channel. This ensures
    75  	// a detaching watcher won't miss an event that was sent before it
    76  	// started detaching.
    77  	broadcastSyncChan chan struct{}
    78  }
    79  
    80  // NewTransfer creates a new transfer.
    81  func NewTransfer() Transfer {
    82  	t := &transfer{
    83  		watchers:          make(map[chan struct{}]*Watcher),
    84  		running:           make(chan struct{}),
    85  		released:          make(chan struct{}),
    86  		broadcastSyncChan: make(chan struct{}),
    87  	}
    88  
    89  	// This uses context.Background instead of a caller-supplied context
    90  	// so that a transfer won't be cancelled automatically if the client
    91  	// which requested it is ^C'd (there could be other viewers).
    92  	t.ctx, t.cancel = context.WithCancel(context.Background())
    93  
    94  	return t
    95  }
    96  
    97  // Broadcast copies the progress and error output to all viewers.
    98  func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) {
    99  	for {
   100  		var (
   101  			p  progress.Progress
   102  			ok bool
   103  		)
   104  		select {
   105  		case p, ok = <-masterProgressChan:
   106  		default:
   107  			// We've depleted the channel, so now we can handle
   108  			// reads on broadcastSyncChan to let detaching watchers
   109  			// know we're caught up.
   110  			select {
   111  			case <-t.broadcastSyncChan:
   112  				continue
   113  			case p, ok = <-masterProgressChan:
   114  			}
   115  		}
   116  
   117  		t.mu.Lock()
   118  		if ok {
   119  			t.lastProgress = p
   120  			t.hasLastProgress = true
   121  			for _, w := range t.watchers {
   122  				select {
   123  				case w.signalChan <- struct{}{}:
   124  				default:
   125  				}
   126  			}
   127  		} else {
   128  			t.broadcastDone = true
   129  		}
   130  		t.mu.Unlock()
   131  		if !ok {
   132  			close(t.running)
   133  			return
   134  		}
   135  	}
   136  }
   137  
   138  // Watch adds a watcher to the transfer. The supplied channel gets progress
   139  // updates and is closed when the transfer finishes.
   140  func (t *transfer) Watch(progressOutput progress.Output) *Watcher {
   141  	t.mu.Lock()
   142  	defer t.mu.Unlock()
   143  
   144  	w := &Watcher{
   145  		releaseChan: make(chan struct{}),
   146  		signalChan:  make(chan struct{}),
   147  		running:     make(chan struct{}),
   148  	}
   149  
   150  	t.watchers[w.releaseChan] = w
   151  
   152  	if t.broadcastDone {
   153  		close(w.running)
   154  		return w
   155  	}
   156  
   157  	go func() {
   158  		defer func() {
   159  			close(w.running)
   160  		}()
   161  		var (
   162  			done           bool
   163  			lastWritten    progress.Progress
   164  			hasLastWritten bool
   165  		)
   166  		for {
   167  			t.mu.Lock()
   168  			hasLastProgress := t.hasLastProgress
   169  			lastProgress := t.lastProgress
   170  			t.mu.Unlock()
   171  
   172  			// Make sure we don't write the last progress item
   173  			// twice.
   174  			if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) {
   175  				progressOutput.WriteProgress(lastProgress)
   176  				lastWritten = lastProgress
   177  				hasLastWritten = true
   178  			}
   179  
   180  			if done {
   181  				return
   182  			}
   183  
   184  			select {
   185  			case <-w.signalChan:
   186  			case <-w.releaseChan:
   187  				done = true
   188  				// Since the watcher is going to detach, make
   189  				// sure the broadcaster is caught up so we
   190  				// don't miss anything.
   191  				select {
   192  				case t.broadcastSyncChan <- struct{}{}:
   193  				case <-t.running:
   194  				}
   195  			case <-t.running:
   196  				done = true
   197  			}
   198  		}
   199  	}()
   200  
   201  	return w
   202  }
   203  
   204  // Release is the inverse of Watch; indicating that the watcher no longer wants
   205  // to be notified about the progress of the transfer. All calls to Watch must
   206  // be paired with later calls to Release so that the lifecycle of the transfer
   207  // is properly managed.
   208  func (t *transfer) Release(watcher *Watcher) {
   209  	t.mu.Lock()
   210  	delete(t.watchers, watcher.releaseChan)
   211  
   212  	if len(t.watchers) == 0 {
   213  		if t.closed {
   214  			// released may have been closed already if all
   215  			// watchers were released, then another one was added
   216  			// while waiting for a previous watcher goroutine to
   217  			// finish.
   218  			select {
   219  			case <-t.released:
   220  			default:
   221  				close(t.released)
   222  			}
   223  		} else {
   224  			t.cancel()
   225  		}
   226  	}
   227  	t.mu.Unlock()
   228  
   229  	close(watcher.releaseChan)
   230  	// Block until the watcher goroutine completes
   231  	<-watcher.running
   232  }
   233  
   234  // Done returns a channel which is closed if the transfer completes or is
   235  // cancelled. Note that having 0 watchers causes a transfer to be cancelled.
   236  func (t *transfer) Done() <-chan struct{} {
   237  	// Note that this doesn't return t.ctx.Done() because that channel will
   238  	// be closed the moment Cancel is called, and we need to return a
   239  	// channel that blocks until a cancellation is actually acknowledged by
   240  	// the transfer function.
   241  	return t.running
   242  }
   243  
   244  // Released returns a channel which is closed once all watchers release the
   245  // transfer AND the transfer is no longer tracked by the transfer manager.
   246  func (t *transfer) Released() <-chan struct{} {
   247  	return t.released
   248  }
   249  
   250  // Context returns the context associated with the transfer.
   251  func (t *transfer) Context() context.Context {
   252  	return t.ctx
   253  }
   254  
   255  // Close is called by the transfer manager when the transfer is no longer
   256  // being tracked.
   257  func (t *transfer) Close() {
   258  	t.mu.Lock()
   259  	t.closed = true
   260  	if len(t.watchers) == 0 {
   261  		close(t.released)
   262  	}
   263  	t.mu.Unlock()
   264  }
   265  
   266  // DoFunc is a function called by the transfer manager to actually perform
   267  // a transfer. It should be non-blocking. It should wait until the start channel
   268  // is closed before transferring any data. If the function closes inactive, that
   269  // signals to the transfer manager that the job is no longer actively moving
   270  // data - for example, it may be waiting for a dependent transfer to finish.
   271  // This prevents it from taking up a slot.
   272  type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer
   273  
   274  // TransferManager is used by LayerDownloadManager and LayerUploadManager to
   275  // schedule and deduplicate transfers. It is up to the TransferManager
   276  // implementation to make the scheduling and concurrency decisions.
   277  type TransferManager interface {
   278  	// Transfer checks if a transfer with the given key is in progress. If
   279  	// so, it returns progress and error output from that transfer.
   280  	// Otherwise, it will call xferFunc to initiate the transfer.
   281  	Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher)
   282  	// SetConcurrency set the concurrencyLimit so that it is adjustable daemon reload
   283  	SetConcurrency(concurrency int)
   284  }
   285  
   286  type transferManager struct {
   287  	mu sync.Mutex
   288  
   289  	concurrencyLimit int
   290  	activeTransfers  int
   291  	transfers        map[string]Transfer
   292  	waitingTransfers []chan struct{}
   293  }
   294  
   295  // NewTransferManager returns a new TransferManager.
   296  func NewTransferManager(concurrencyLimit int) TransferManager {
   297  	return &transferManager{
   298  		concurrencyLimit: concurrencyLimit,
   299  		transfers:        make(map[string]Transfer),
   300  	}
   301  }
   302  
   303  // SetConcurrency sets the concurrencyLimit
   304  func (tm *transferManager) SetConcurrency(concurrency int) {
   305  	tm.mu.Lock()
   306  	tm.concurrencyLimit = concurrency
   307  	tm.mu.Unlock()
   308  }
   309  
   310  // Transfer checks if a transfer matching the given key is in progress. If not,
   311  // it starts one by calling xferFunc. The caller supplies a channel which
   312  // receives progress output from the transfer.
   313  func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) {
   314  	tm.mu.Lock()
   315  	defer tm.mu.Unlock()
   316  
   317  	for {
   318  		xfer, present := tm.transfers[key]
   319  		if !present {
   320  			break
   321  		}
   322  		// Transfer is already in progress.
   323  		watcher := xfer.Watch(progressOutput)
   324  
   325  		select {
   326  		case <-xfer.Context().Done():
   327  			// We don't want to watch a transfer that has been cancelled.
   328  			// Wait for it to be removed from the map and try again.
   329  			xfer.Release(watcher)
   330  			tm.mu.Unlock()
   331  			// The goroutine that removes this transfer from the
   332  			// map is also waiting for xfer.Done(), so yield to it.
   333  			// This could be avoided by adding a Closed method
   334  			// to Transfer to allow explicitly waiting for it to be
   335  			// removed the map, but forcing a scheduling round in
   336  			// this very rare case seems better than bloating the
   337  			// interface definition.
   338  			runtime.Gosched()
   339  			<-xfer.Done()
   340  			tm.mu.Lock()
   341  		default:
   342  			return xfer, watcher
   343  		}
   344  	}
   345  
   346  	start := make(chan struct{})
   347  	inactive := make(chan struct{})
   348  
   349  	if tm.concurrencyLimit == 0 || tm.activeTransfers < tm.concurrencyLimit {
   350  		close(start)
   351  		tm.activeTransfers++
   352  	} else {
   353  		tm.waitingTransfers = append(tm.waitingTransfers, start)
   354  	}
   355  
   356  	masterProgressChan := make(chan progress.Progress)
   357  	xfer := xferFunc(masterProgressChan, start, inactive)
   358  	watcher := xfer.Watch(progressOutput)
   359  	go xfer.Broadcast(masterProgressChan)
   360  	tm.transfers[key] = xfer
   361  
   362  	// When the transfer is finished, remove from the map.
   363  	go func() {
   364  		for {
   365  			select {
   366  			case <-inactive:
   367  				tm.mu.Lock()
   368  				tm.inactivate(start)
   369  				tm.mu.Unlock()
   370  				inactive = nil
   371  			case <-xfer.Done():
   372  				tm.mu.Lock()
   373  				if inactive != nil {
   374  					tm.inactivate(start)
   375  				}
   376  				delete(tm.transfers, key)
   377  				tm.mu.Unlock()
   378  				xfer.Close()
   379  				return
   380  			}
   381  		}
   382  	}()
   383  
   384  	return xfer, watcher
   385  }
   386  
   387  func (tm *transferManager) inactivate(start chan struct{}) {
   388  	// If the transfer was started, remove it from the activeTransfers
   389  	// count.
   390  	select {
   391  	case <-start:
   392  		// Start next transfer if any are waiting
   393  		if len(tm.waitingTransfers) != 0 {
   394  			close(tm.waitingTransfers[0])
   395  			tm.waitingTransfers = tm.waitingTransfers[1:]
   396  		} else {
   397  			tm.activeTransfers--
   398  		}
   399  	default:
   400  	}
   401  }