github.com/docker/docker@v299999999.0.0-20200612211812-aaf470eca7b5+incompatible/distribution/xfer/upload.go (about)

     1  package xfer // import "github.com/docker/docker/distribution/xfer"
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"time"
     7  
     8  	"github.com/docker/distribution"
     9  	"github.com/docker/docker/layer"
    10  	"github.com/docker/docker/pkg/progress"
    11  	"github.com/sirupsen/logrus"
    12  )
    13  
    14  const maxUploadAttempts = 5
    15  
    16  // LayerUploadManager provides task management and progress reporting for
    17  // uploads.
    18  type LayerUploadManager struct {
    19  	tm           TransferManager
    20  	waitDuration time.Duration
    21  }
    22  
    23  // SetConcurrency sets the max concurrent uploads for each push
    24  func (lum *LayerUploadManager) SetConcurrency(concurrency int) {
    25  	lum.tm.SetConcurrency(concurrency)
    26  }
    27  
    28  // NewLayerUploadManager returns a new LayerUploadManager.
    29  func NewLayerUploadManager(concurrencyLimit int, options ...func(*LayerUploadManager)) *LayerUploadManager {
    30  	manager := LayerUploadManager{
    31  		tm:           NewTransferManager(concurrencyLimit),
    32  		waitDuration: time.Second,
    33  	}
    34  	for _, option := range options {
    35  		option(&manager)
    36  	}
    37  	return &manager
    38  }
    39  
    40  type uploadTransfer struct {
    41  	Transfer
    42  
    43  	remoteDescriptor distribution.Descriptor
    44  	err              error
    45  }
    46  
    47  // An UploadDescriptor references a layer that may need to be uploaded.
    48  type UploadDescriptor interface {
    49  	// Key returns the key used to deduplicate uploads.
    50  	Key() string
    51  	// ID returns the ID for display purposes.
    52  	ID() string
    53  	// DiffID should return the DiffID for this layer.
    54  	DiffID() layer.DiffID
    55  	// Upload is called to perform the Upload.
    56  	Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error)
    57  	// SetRemoteDescriptor provides the distribution.Descriptor that was
    58  	// returned by Upload. This descriptor is not to be confused with
    59  	// the UploadDescriptor interface, which is used for internally
    60  	// identifying layers that are being uploaded.
    61  	SetRemoteDescriptor(descriptor distribution.Descriptor)
    62  }
    63  
    64  // Upload is a blocking function which ensures the listed layers are present on
    65  // the remote registry. It uses the string returned by the Key method to
    66  // deduplicate uploads.
    67  func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error {
    68  	var (
    69  		uploads          []*uploadTransfer
    70  		dedupDescriptors = make(map[string]*uploadTransfer)
    71  	)
    72  
    73  	for _, descriptor := range layers {
    74  		progress.Update(progressOutput, descriptor.ID(), "Preparing")
    75  
    76  		key := descriptor.Key()
    77  		if _, present := dedupDescriptors[key]; present {
    78  			continue
    79  		}
    80  
    81  		xferFunc := lum.makeUploadFunc(descriptor)
    82  		upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput)
    83  		defer upload.Release(watcher)
    84  		uploads = append(uploads, upload.(*uploadTransfer))
    85  		dedupDescriptors[key] = upload.(*uploadTransfer)
    86  	}
    87  
    88  	for _, upload := range uploads {
    89  		select {
    90  		case <-ctx.Done():
    91  			return ctx.Err()
    92  		case <-upload.Transfer.Done():
    93  			if upload.err != nil {
    94  				return upload.err
    95  			}
    96  		}
    97  	}
    98  	for _, l := range layers {
    99  		l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor)
   100  	}
   101  
   102  	return nil
   103  }
   104  
   105  func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc {
   106  	return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
   107  		u := &uploadTransfer{
   108  			Transfer: NewTransfer(),
   109  		}
   110  
   111  		go func() {
   112  			defer func() {
   113  				close(progressChan)
   114  			}()
   115  
   116  			progressOutput := progress.ChanOutput(progressChan)
   117  
   118  			select {
   119  			case <-start:
   120  			default:
   121  				progress.Update(progressOutput, descriptor.ID(), "Waiting")
   122  				<-start
   123  			}
   124  
   125  			retries := 0
   126  			for {
   127  				remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput)
   128  				if err == nil {
   129  					u.remoteDescriptor = remoteDescriptor
   130  					break
   131  				}
   132  
   133  				// If an error was returned because the context
   134  				// was cancelled, we shouldn't retry.
   135  				select {
   136  				case <-u.Transfer.Context().Done():
   137  					u.err = err
   138  					return
   139  				default:
   140  				}
   141  
   142  				retries++
   143  				if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts {
   144  					logrus.Errorf("Upload failed: %v", err)
   145  					u.err = err
   146  					return
   147  				}
   148  
   149  				logrus.Errorf("Upload failed, retrying: %v", err)
   150  				delay := retries * 5
   151  				ticker := time.NewTicker(lum.waitDuration)
   152  
   153  			selectLoop:
   154  				for {
   155  					progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1])
   156  					select {
   157  					case <-ticker.C:
   158  						delay--
   159  						if delay == 0 {
   160  							ticker.Stop()
   161  							break selectLoop
   162  						}
   163  					case <-u.Transfer.Context().Done():
   164  						ticker.Stop()
   165  						u.err = errors.New("upload cancelled during retry delay")
   166  						return
   167  					}
   168  				}
   169  			}
   170  		}()
   171  
   172  		return u
   173  	}
   174  }