github.com/vincentwoo/docker@v0.7.3-0.20160116130405-82401a4b13c0/distribution/xfer/transfer_test.go (about)

     1  package xfer
     2  
     3  import (
     4  	"sync/atomic"
     5  	"testing"
     6  	"time"
     7  
     8  	"github.com/docker/docker/pkg/progress"
     9  )
    10  
    11  func TestTransfer(t *testing.T) {
    12  	makeXferFunc := func(id string) DoFunc {
    13  		return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
    14  			select {
    15  			case <-start:
    16  			default:
    17  				t.Fatalf("transfer function not started even though concurrency limit not reached")
    18  			}
    19  
    20  			xfer := NewTransfer()
    21  			go func() {
    22  				for i := 0; i <= 10; i++ {
    23  					progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10}
    24  					time.Sleep(10 * time.Millisecond)
    25  				}
    26  				close(progressChan)
    27  			}()
    28  			return xfer
    29  		}
    30  	}
    31  
    32  	tm := NewTransferManager(5)
    33  	progressChan := make(chan progress.Progress)
    34  	progressDone := make(chan struct{})
    35  	receivedProgress := make(map[string]int64)
    36  
    37  	go func() {
    38  		for p := range progressChan {
    39  			val, present := receivedProgress[p.ID]
    40  			if !present {
    41  				if p.Current != 0 {
    42  					t.Fatalf("got unexpected progress value: %d (expected 0)", p.Current)
    43  				}
    44  			} else if p.Current == 10 {
    45  				// Special case: last progress output may be
    46  				// repeated because the transfer finishing
    47  				// causes the latest progress output to be
    48  				// written to the channel (in case the watcher
    49  				// missed it).
    50  				if p.Current != 9 && p.Current != 10 {
    51  					t.Fatalf("got unexpected progress value: %d (expected %d)", p.Current, val+1)
    52  				}
    53  			} else if p.Current != val+1 {
    54  				t.Fatalf("got unexpected progress value: %d (expected %d)", p.Current, val+1)
    55  			}
    56  			receivedProgress[p.ID] = p.Current
    57  		}
    58  		close(progressDone)
    59  	}()
    60  
    61  	// Start a few transfers
    62  	ids := []string{"id1", "id2", "id3"}
    63  	xfers := make([]Transfer, len(ids))
    64  	watchers := make([]*Watcher, len(ids))
    65  	for i, id := range ids {
    66  		xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan))
    67  	}
    68  
    69  	for i, xfer := range xfers {
    70  		<-xfer.Done()
    71  		xfer.Release(watchers[i])
    72  	}
    73  	close(progressChan)
    74  	<-progressDone
    75  
    76  	for _, id := range ids {
    77  		if receivedProgress[id] != 10 {
    78  			t.Fatalf("final progress value %d instead of 10", receivedProgress[id])
    79  		}
    80  	}
    81  }
    82  
    83  func TestConcurrencyLimit(t *testing.T) {
    84  	concurrencyLimit := 3
    85  	var runningJobs int32
    86  
    87  	makeXferFunc := func(id string) DoFunc {
    88  		return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
    89  			xfer := NewTransfer()
    90  			go func() {
    91  				<-start
    92  				totalJobs := atomic.AddInt32(&runningJobs, 1)
    93  				if int(totalJobs) > concurrencyLimit {
    94  					t.Fatalf("too many jobs running")
    95  				}
    96  				for i := 0; i <= 10; i++ {
    97  					progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10}
    98  					time.Sleep(10 * time.Millisecond)
    99  				}
   100  				atomic.AddInt32(&runningJobs, -1)
   101  				close(progressChan)
   102  			}()
   103  			return xfer
   104  		}
   105  	}
   106  
   107  	tm := NewTransferManager(concurrencyLimit)
   108  	progressChan := make(chan progress.Progress)
   109  	progressDone := make(chan struct{})
   110  	receivedProgress := make(map[string]int64)
   111  
   112  	go func() {
   113  		for p := range progressChan {
   114  			receivedProgress[p.ID] = p.Current
   115  		}
   116  		close(progressDone)
   117  	}()
   118  
   119  	// Start more transfers than the concurrency limit
   120  	ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"}
   121  	xfers := make([]Transfer, len(ids))
   122  	watchers := make([]*Watcher, len(ids))
   123  	for i, id := range ids {
   124  		xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan))
   125  	}
   126  
   127  	for i, xfer := range xfers {
   128  		<-xfer.Done()
   129  		xfer.Release(watchers[i])
   130  	}
   131  	close(progressChan)
   132  	<-progressDone
   133  
   134  	for _, id := range ids {
   135  		if receivedProgress[id] != 10 {
   136  			t.Fatalf("final progress value %d instead of 10", receivedProgress[id])
   137  		}
   138  	}
   139  }
   140  
   141  func TestInactiveJobs(t *testing.T) {
   142  	concurrencyLimit := 3
   143  	var runningJobs int32
   144  	testDone := make(chan struct{})
   145  
   146  	makeXferFunc := func(id string) DoFunc {
   147  		return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
   148  			xfer := NewTransfer()
   149  			go func() {
   150  				<-start
   151  				totalJobs := atomic.AddInt32(&runningJobs, 1)
   152  				if int(totalJobs) > concurrencyLimit {
   153  					t.Fatalf("too many jobs running")
   154  				}
   155  				for i := 0; i <= 10; i++ {
   156  					progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10}
   157  					time.Sleep(10 * time.Millisecond)
   158  				}
   159  				atomic.AddInt32(&runningJobs, -1)
   160  				close(inactive)
   161  				<-testDone
   162  				close(progressChan)
   163  			}()
   164  			return xfer
   165  		}
   166  	}
   167  
   168  	tm := NewTransferManager(concurrencyLimit)
   169  	progressChan := make(chan progress.Progress)
   170  	progressDone := make(chan struct{})
   171  	receivedProgress := make(map[string]int64)
   172  
   173  	go func() {
   174  		for p := range progressChan {
   175  			receivedProgress[p.ID] = p.Current
   176  		}
   177  		close(progressDone)
   178  	}()
   179  
   180  	// Start more transfers than the concurrency limit
   181  	ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"}
   182  	xfers := make([]Transfer, len(ids))
   183  	watchers := make([]*Watcher, len(ids))
   184  	for i, id := range ids {
   185  		xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan))
   186  	}
   187  
   188  	close(testDone)
   189  	for i, xfer := range xfers {
   190  		<-xfer.Done()
   191  		xfer.Release(watchers[i])
   192  	}
   193  	close(progressChan)
   194  	<-progressDone
   195  
   196  	for _, id := range ids {
   197  		if receivedProgress[id] != 10 {
   198  			t.Fatalf("final progress value %d instead of 10", receivedProgress[id])
   199  		}
   200  	}
   201  }
   202  
   203  func TestWatchRelease(t *testing.T) {
   204  	ready := make(chan struct{})
   205  
   206  	makeXferFunc := func(id string) DoFunc {
   207  		return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
   208  			xfer := NewTransfer()
   209  			go func() {
   210  				defer func() {
   211  					close(progressChan)
   212  				}()
   213  				<-ready
   214  				for i := int64(0); ; i++ {
   215  					select {
   216  					case <-time.After(10 * time.Millisecond):
   217  					case <-xfer.Context().Done():
   218  						return
   219  					}
   220  					progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10}
   221  				}
   222  			}()
   223  			return xfer
   224  		}
   225  	}
   226  
   227  	tm := NewTransferManager(5)
   228  
   229  	type watcherInfo struct {
   230  		watcher               *Watcher
   231  		progressChan          chan progress.Progress
   232  		progressDone          chan struct{}
   233  		receivedFirstProgress chan struct{}
   234  	}
   235  
   236  	progressConsumer := func(w watcherInfo) {
   237  		first := true
   238  		for range w.progressChan {
   239  			if first {
   240  				close(w.receivedFirstProgress)
   241  			}
   242  			first = false
   243  		}
   244  		close(w.progressDone)
   245  	}
   246  
   247  	// Start a transfer
   248  	watchers := make([]watcherInfo, 5)
   249  	var xfer Transfer
   250  	watchers[0].progressChan = make(chan progress.Progress)
   251  	watchers[0].progressDone = make(chan struct{})
   252  	watchers[0].receivedFirstProgress = make(chan struct{})
   253  	xfer, watchers[0].watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(watchers[0].progressChan))
   254  	go progressConsumer(watchers[0])
   255  
   256  	// Give it multiple watchers
   257  	for i := 1; i != len(watchers); i++ {
   258  		watchers[i].progressChan = make(chan progress.Progress)
   259  		watchers[i].progressDone = make(chan struct{})
   260  		watchers[i].receivedFirstProgress = make(chan struct{})
   261  		watchers[i].watcher = xfer.Watch(progress.ChanOutput(watchers[i].progressChan))
   262  		go progressConsumer(watchers[i])
   263  	}
   264  
   265  	// Now that the watchers are set up, allow the transfer goroutine to
   266  	// proceed.
   267  	close(ready)
   268  
   269  	// Confirm that each watcher gets progress output.
   270  	for _, w := range watchers {
   271  		<-w.receivedFirstProgress
   272  	}
   273  
   274  	// Release one watcher every 5ms
   275  	for _, w := range watchers {
   276  		xfer.Release(w.watcher)
   277  		<-time.After(5 * time.Millisecond)
   278  	}
   279  
   280  	// Now that all watchers have been released, Released() should
   281  	// return a closed channel.
   282  	<-xfer.Released()
   283  
   284  	// Done() should return a closed channel because the xfer func returned
   285  	// due to cancellation.
   286  	<-xfer.Done()
   287  
   288  	for _, w := range watchers {
   289  		close(w.progressChan)
   290  		<-w.progressDone
   291  	}
   292  }
   293  
   294  func TestDuplicateTransfer(t *testing.T) {
   295  	ready := make(chan struct{})
   296  
   297  	var xferFuncCalls int32
   298  
   299  	makeXferFunc := func(id string) DoFunc {
   300  		return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
   301  			atomic.AddInt32(&xferFuncCalls, 1)
   302  			xfer := NewTransfer()
   303  			go func() {
   304  				defer func() {
   305  					close(progressChan)
   306  				}()
   307  				<-ready
   308  				for i := int64(0); ; i++ {
   309  					select {
   310  					case <-time.After(10 * time.Millisecond):
   311  					case <-xfer.Context().Done():
   312  						return
   313  					}
   314  					progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10}
   315  				}
   316  			}()
   317  			return xfer
   318  		}
   319  	}
   320  
   321  	tm := NewTransferManager(5)
   322  
   323  	type transferInfo struct {
   324  		xfer                  Transfer
   325  		watcher               *Watcher
   326  		progressChan          chan progress.Progress
   327  		progressDone          chan struct{}
   328  		receivedFirstProgress chan struct{}
   329  	}
   330  
   331  	progressConsumer := func(t transferInfo) {
   332  		first := true
   333  		for range t.progressChan {
   334  			if first {
   335  				close(t.receivedFirstProgress)
   336  			}
   337  			first = false
   338  		}
   339  		close(t.progressDone)
   340  	}
   341  
   342  	// Try to start multiple transfers with the same ID
   343  	transfers := make([]transferInfo, 5)
   344  	for i := range transfers {
   345  		t := &transfers[i]
   346  		t.progressChan = make(chan progress.Progress)
   347  		t.progressDone = make(chan struct{})
   348  		t.receivedFirstProgress = make(chan struct{})
   349  		t.xfer, t.watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(t.progressChan))
   350  		go progressConsumer(*t)
   351  	}
   352  
   353  	// Allow the transfer goroutine to proceed.
   354  	close(ready)
   355  
   356  	// Confirm that each watcher gets progress output.
   357  	for _, t := range transfers {
   358  		<-t.receivedFirstProgress
   359  	}
   360  
   361  	// Confirm that the transfer function was called exactly once.
   362  	if xferFuncCalls != 1 {
   363  		t.Fatal("transfer function wasn't called exactly once")
   364  	}
   365  
   366  	// Release one watcher every 5ms
   367  	for _, t := range transfers {
   368  		t.xfer.Release(t.watcher)
   369  		<-time.After(5 * time.Millisecond)
   370  	}
   371  
   372  	for _, t := range transfers {
   373  		// Now that all watchers have been released, Released() should
   374  		// return a closed channel.
   375  		<-t.xfer.Released()
   376  		// Done() should return a closed channel because the xfer func returned
   377  		// due to cancellation.
   378  		<-t.xfer.Done()
   379  	}
   380  
   381  	for _, t := range transfers {
   382  		close(t.progressChan)
   383  		<-t.progressDone
   384  	}
   385  }