github.com/reds/docker@v1.11.2-rc1/distribution/xfer/upload_test.go (about)

     1  package xfer
     2  
     3  import (
     4  	"errors"
     5  	"sync/atomic"
     6  	"testing"
     7  	"time"
     8  
     9  	"github.com/docker/distribution"
    10  	"github.com/docker/distribution/digest"
    11  	"github.com/docker/docker/layer"
    12  	"github.com/docker/docker/pkg/progress"
    13  	"golang.org/x/net/context"
    14  )
    15  
    16  const maxUploadConcurrency = 3
    17  
    18  type mockUploadDescriptor struct {
    19  	currentUploads  *int32
    20  	diffID          layer.DiffID
    21  	simulateRetries int
    22  }
    23  
    24  // Key returns the key used to deduplicate downloads.
    25  func (u *mockUploadDescriptor) Key() string {
    26  	return u.diffID.String()
    27  }
    28  
    29  // ID returns the ID for display purposes.
    30  func (u *mockUploadDescriptor) ID() string {
    31  	return u.diffID.String()
    32  }
    33  
    34  // DiffID should return the DiffID for this layer.
    35  func (u *mockUploadDescriptor) DiffID() layer.DiffID {
    36  	return u.diffID
    37  }
    38  
    39  // SetRemoteDescriptor is not used in the mock.
    40  func (u *mockUploadDescriptor) SetRemoteDescriptor(remoteDescriptor distribution.Descriptor) {
    41  }
    42  
    43  // Upload is called to perform the upload.
    44  func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) {
    45  	if u.currentUploads != nil {
    46  		defer atomic.AddInt32(u.currentUploads, -1)
    47  
    48  		if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency {
    49  			return distribution.Descriptor{}, errors.New("concurrency limit exceeded")
    50  		}
    51  	}
    52  
    53  	// Sleep a bit to simulate a time-consuming upload.
    54  	for i := int64(0); i <= 10; i++ {
    55  		select {
    56  		case <-ctx.Done():
    57  			return distribution.Descriptor{}, ctx.Err()
    58  		case <-time.After(10 * time.Millisecond):
    59  			progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10})
    60  		}
    61  	}
    62  
    63  	if u.simulateRetries != 0 {
    64  		u.simulateRetries--
    65  		return distribution.Descriptor{}, errors.New("simulating retry")
    66  	}
    67  
    68  	return distribution.Descriptor{}, nil
    69  }
    70  
    71  func uploadDescriptors(currentUploads *int32) []UploadDescriptor {
    72  	return []UploadDescriptor{
    73  		&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0},
    74  		&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:1515325234325236634634608943609283523908626098235490238423902343"), 0},
    75  		&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:6929356290463485374960346430698374523437683470934634534953453453"), 0},
    76  		&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0},
    77  		&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:8159352387436803946235346346368745389534789534897538734598734987"), 1},
    78  		&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:4637863963478346897346987346987346789346789364879364897364987346"), 0},
    79  	}
    80  }
    81  
    82  var expectedDigests = map[layer.DiffID]digest.Digest{
    83  	layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"): digest.Digest("sha256:c5095d6cf7ee42b7b064371dcc1dc3fb4af197f04d01a60009d484bd432724fc"),
    84  	layer.DiffID("sha256:1515325234325236634634608943609283523908626098235490238423902343"): digest.Digest("sha256:968cbfe2ff5269ea1729b3804767a1f57ffbc442d3bc86f47edbf7e688a4f36e"),
    85  	layer.DiffID("sha256:6929356290463485374960346430698374523437683470934634534953453453"): digest.Digest("sha256:8a5e56ab4b477a400470a7d5d4c1ca0c91235fd723ab19cc862636a06f3a735d"),
    86  	layer.DiffID("sha256:8159352387436803946235346346368745389534789534897538734598734987"): digest.Digest("sha256:5e733e5cd3688512fc240bd5c178e72671c9915947d17bb8451750d827944cb2"),
    87  	layer.DiffID("sha256:4637863963478346897346987346987346789346789364879364897364987346"): digest.Digest("sha256:ec4bb98d15e554a9f66c3ef9296cf46772c0ded3b1592bd8324d96e2f60f460c"),
    88  }
    89  
    90  func TestSuccessfulUpload(t *testing.T) {
    91  	lum := NewLayerUploadManager(maxUploadConcurrency)
    92  
    93  	progressChan := make(chan progress.Progress)
    94  	progressDone := make(chan struct{})
    95  	receivedProgress := make(map[string]int64)
    96  
    97  	go func() {
    98  		for p := range progressChan {
    99  			receivedProgress[p.ID] = p.Current
   100  		}
   101  		close(progressDone)
   102  	}()
   103  
   104  	var currentUploads int32
   105  	descriptors := uploadDescriptors(&currentUploads)
   106  
   107  	err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan))
   108  	if err != nil {
   109  		t.Fatalf("upload error: %v", err)
   110  	}
   111  
   112  	close(progressChan)
   113  	<-progressDone
   114  }
   115  
   116  func TestCancelledUpload(t *testing.T) {
   117  	lum := NewLayerUploadManager(maxUploadConcurrency)
   118  
   119  	progressChan := make(chan progress.Progress)
   120  	progressDone := make(chan struct{})
   121  
   122  	go func() {
   123  		for range progressChan {
   124  		}
   125  		close(progressDone)
   126  	}()
   127  
   128  	ctx, cancel := context.WithCancel(context.Background())
   129  
   130  	go func() {
   131  		<-time.After(time.Millisecond)
   132  		cancel()
   133  	}()
   134  
   135  	descriptors := uploadDescriptors(nil)
   136  	err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan))
   137  	if err != context.Canceled {
   138  		t.Fatal("expected upload to be cancelled")
   139  	}
   140  
   141  	close(progressChan)
   142  	<-progressDone
   143  }