github.com/kaisenlinux/docker.io@v0.0.0-20230510090727-ea55db55fac7/engine/distribution/xfer/transfer.go (about) 1 package xfer // import "github.com/docker/docker/distribution/xfer" 2 3 import ( 4 "context" 5 "runtime" 6 "sync" 7 8 "github.com/docker/docker/pkg/progress" 9 "github.com/pkg/errors" 10 ) 11 12 // DoNotRetry is an error wrapper indicating that the error cannot be resolved 13 // with a retry. 14 type DoNotRetry struct { 15 Err error 16 } 17 18 // Error returns the stringified representation of the encapsulated error. 19 func (e DoNotRetry) Error() string { 20 return e.Err.Error() 21 } 22 23 // IsDoNotRetryError returns true if the error is caused by DoNotRetry error, 24 // and the transfer should not be retried. 25 func IsDoNotRetryError(err error) bool { 26 var dnr DoNotRetry 27 return errors.As(err, &dnr) 28 } 29 30 // Watcher is returned by Watch and can be passed to Release to stop watching. 31 type Watcher struct { 32 // signalChan is used to signal to the watcher goroutine that 33 // new progress information is available, or that the transfer 34 // has finished. 35 signalChan chan struct{} 36 // releaseChan signals to the watcher goroutine that the watcher 37 // should be detached. 38 releaseChan chan struct{} 39 // running remains open as long as the watcher is watching the 40 // transfer. It gets closed if the transfer finishes or the 41 // watcher is detached. 42 running chan struct{} 43 } 44 45 // Transfer represents an in-progress transfer. 46 type Transfer interface { 47 Watch(progressOutput progress.Output) *Watcher 48 Release(*Watcher) 49 Context() context.Context 50 Close() 51 Done() <-chan struct{} 52 Released() <-chan struct{} 53 Broadcast(mainProgressChan <-chan progress.Progress) 54 } 55 56 type transfer struct { 57 mu sync.Mutex 58 59 ctx context.Context 60 cancel context.CancelFunc 61 62 // watchers keeps track of the goroutines monitoring progress output, 63 // indexed by the channels that release them. 64 watchers map[chan struct{}]*Watcher 65 66 // lastProgress is the most recently received progress event. 67 lastProgress progress.Progress 68 // hasLastProgress is true when lastProgress has been set. 69 hasLastProgress bool 70 71 // running remains open as long as the transfer is in progress. 72 running chan struct{} 73 // released stays open until all watchers release the transfer and 74 // the transfer is no longer tracked by the transfer manager. 75 released chan struct{} 76 77 // broadcastDone is true if the main progress channel has closed. 78 broadcastDone bool 79 // closed is true if Close has been called 80 closed bool 81 // broadcastSyncChan allows watchers to "ping" the broadcasting 82 // goroutine to wait for it for deplete its input channel. This ensures 83 // a detaching watcher won't miss an event that was sent before it 84 // started detaching. 85 broadcastSyncChan chan struct{} 86 } 87 88 // NewTransfer creates a new transfer. 89 func NewTransfer() Transfer { 90 t := &transfer{ 91 watchers: make(map[chan struct{}]*Watcher), 92 running: make(chan struct{}), 93 released: make(chan struct{}), 94 broadcastSyncChan: make(chan struct{}), 95 } 96 97 // This uses context.Background instead of a caller-supplied context 98 // so that a transfer won't be cancelled automatically if the client 99 // which requested it is ^C'd (there could be other viewers). 100 t.ctx, t.cancel = context.WithCancel(context.Background()) 101 102 return t 103 } 104 105 // Broadcast copies the progress and error output to all viewers. 106 func (t *transfer) Broadcast(mainProgressChan <-chan progress.Progress) { 107 for { 108 var ( 109 p progress.Progress 110 ok bool 111 ) 112 select { 113 case p, ok = <-mainProgressChan: 114 default: 115 // We've depleted the channel, so now we can handle 116 // reads on broadcastSyncChan to let detaching watchers 117 // know we're caught up. 118 select { 119 case <-t.broadcastSyncChan: 120 continue 121 case p, ok = <-mainProgressChan: 122 } 123 } 124 125 t.mu.Lock() 126 if ok { 127 t.lastProgress = p 128 t.hasLastProgress = true 129 for _, w := range t.watchers { 130 select { 131 case w.signalChan <- struct{}{}: 132 default: 133 } 134 } 135 } else { 136 t.broadcastDone = true 137 } 138 t.mu.Unlock() 139 if !ok { 140 close(t.running) 141 return 142 } 143 } 144 } 145 146 // Watch adds a watcher to the transfer. The supplied channel gets progress 147 // updates and is closed when the transfer finishes. 148 func (t *transfer) Watch(progressOutput progress.Output) *Watcher { 149 t.mu.Lock() 150 defer t.mu.Unlock() 151 152 w := &Watcher{ 153 releaseChan: make(chan struct{}), 154 signalChan: make(chan struct{}), 155 running: make(chan struct{}), 156 } 157 158 t.watchers[w.releaseChan] = w 159 160 if t.broadcastDone { 161 close(w.running) 162 return w 163 } 164 165 go func() { 166 defer func() { 167 close(w.running) 168 }() 169 var ( 170 done bool 171 lastWritten progress.Progress 172 hasLastWritten bool 173 ) 174 for { 175 t.mu.Lock() 176 hasLastProgress := t.hasLastProgress 177 lastProgress := t.lastProgress 178 t.mu.Unlock() 179 180 // Make sure we don't write the last progress item 181 // twice. 182 if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) { 183 progressOutput.WriteProgress(lastProgress) 184 lastWritten = lastProgress 185 hasLastWritten = true 186 } 187 188 if done { 189 return 190 } 191 192 select { 193 case <-w.signalChan: 194 case <-w.releaseChan: 195 done = true 196 // Since the watcher is going to detach, make 197 // sure the broadcaster is caught up so we 198 // don't miss anything. 199 select { 200 case t.broadcastSyncChan <- struct{}{}: 201 case <-t.running: 202 } 203 case <-t.running: 204 done = true 205 } 206 } 207 }() 208 209 return w 210 } 211 212 // Release is the inverse of Watch; indicating that the watcher no longer wants 213 // to be notified about the progress of the transfer. All calls to Watch must 214 // be paired with later calls to Release so that the lifecycle of the transfer 215 // is properly managed. 216 func (t *transfer) Release(watcher *Watcher) { 217 t.mu.Lock() 218 delete(t.watchers, watcher.releaseChan) 219 220 if len(t.watchers) == 0 { 221 if t.closed { 222 // released may have been closed already if all 223 // watchers were released, then another one was added 224 // while waiting for a previous watcher goroutine to 225 // finish. 226 select { 227 case <-t.released: 228 default: 229 close(t.released) 230 } 231 } else { 232 t.cancel() 233 } 234 } 235 t.mu.Unlock() 236 237 close(watcher.releaseChan) 238 // Block until the watcher goroutine completes 239 <-watcher.running 240 } 241 242 // Done returns a channel which is closed if the transfer completes or is 243 // cancelled. Note that having 0 watchers causes a transfer to be cancelled. 244 func (t *transfer) Done() <-chan struct{} { 245 // Note that this doesn't return t.ctx.Done() because that channel will 246 // be closed the moment Cancel is called, and we need to return a 247 // channel that blocks until a cancellation is actually acknowledged by 248 // the transfer function. 249 return t.running 250 } 251 252 // Released returns a channel which is closed once all watchers release the 253 // transfer AND the transfer is no longer tracked by the transfer manager. 254 func (t *transfer) Released() <-chan struct{} { 255 return t.released 256 } 257 258 // Context returns the context associated with the transfer. 259 func (t *transfer) Context() context.Context { 260 return t.ctx 261 } 262 263 // Close is called by the transfer manager when the transfer is no longer 264 // being tracked. 265 func (t *transfer) Close() { 266 t.mu.Lock() 267 t.closed = true 268 if len(t.watchers) == 0 { 269 close(t.released) 270 } 271 t.mu.Unlock() 272 } 273 274 // DoFunc is a function called by the transfer manager to actually perform 275 // a transfer. It should be non-blocking. It should wait until the start channel 276 // is closed before transferring any data. If the function closes inactive, that 277 // signals to the transfer manager that the job is no longer actively moving 278 // data - for example, it may be waiting for a dependent transfer to finish. 279 // This prevents it from taking up a slot. 280 type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer 281 282 // TransferManager is used by LayerDownloadManager and LayerUploadManager to 283 // schedule and deduplicate transfers. It is up to the TransferManager 284 // implementation to make the scheduling and concurrency decisions. 285 type TransferManager interface { 286 // Transfer checks if a transfer with the given key is in progress. If 287 // so, it returns progress and error output from that transfer. 288 // Otherwise, it will call xferFunc to initiate the transfer. 289 Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) 290 // SetConcurrency set the concurrencyLimit so that it is adjustable daemon reload 291 SetConcurrency(concurrency int) 292 } 293 294 type transferManager struct { 295 mu sync.Mutex 296 297 concurrencyLimit int 298 activeTransfers int 299 transfers map[string]Transfer 300 waitingTransfers []chan struct{} 301 } 302 303 // NewTransferManager returns a new TransferManager. 304 func NewTransferManager(concurrencyLimit int) TransferManager { 305 return &transferManager{ 306 concurrencyLimit: concurrencyLimit, 307 transfers: make(map[string]Transfer), 308 } 309 } 310 311 // SetConcurrency sets the concurrencyLimit 312 func (tm *transferManager) SetConcurrency(concurrency int) { 313 tm.mu.Lock() 314 tm.concurrencyLimit = concurrency 315 tm.mu.Unlock() 316 } 317 318 // Transfer checks if a transfer matching the given key is in progress. If not, 319 // it starts one by calling xferFunc. The caller supplies a channel which 320 // receives progress output from the transfer. 321 func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) { 322 tm.mu.Lock() 323 defer tm.mu.Unlock() 324 325 for { 326 xfer, present := tm.transfers[key] 327 if !present { 328 break 329 } 330 // Transfer is already in progress. 331 watcher := xfer.Watch(progressOutput) 332 333 select { 334 case <-xfer.Context().Done(): 335 // We don't want to watch a transfer that has been cancelled. 336 // Wait for it to be removed from the map and try again. 337 xfer.Release(watcher) 338 tm.mu.Unlock() 339 // The goroutine that removes this transfer from the 340 // map is also waiting for xfer.Done(), so yield to it. 341 // This could be avoided by adding a Closed method 342 // to Transfer to allow explicitly waiting for it to be 343 // removed the map, but forcing a scheduling round in 344 // this very rare case seems better than bloating the 345 // interface definition. 346 runtime.Gosched() 347 <-xfer.Done() 348 tm.mu.Lock() 349 default: 350 return xfer, watcher 351 } 352 } 353 354 start := make(chan struct{}) 355 inactive := make(chan struct{}) 356 357 if tm.concurrencyLimit == 0 || tm.activeTransfers < tm.concurrencyLimit { 358 close(start) 359 tm.activeTransfers++ 360 } else { 361 tm.waitingTransfers = append(tm.waitingTransfers, start) 362 } 363 364 mainProgressChan := make(chan progress.Progress) 365 xfer := xferFunc(mainProgressChan, start, inactive) 366 watcher := xfer.Watch(progressOutput) 367 go xfer.Broadcast(mainProgressChan) 368 tm.transfers[key] = xfer 369 370 // When the transfer is finished, remove from the map. 371 go func() { 372 for { 373 select { 374 case <-inactive: 375 tm.mu.Lock() 376 tm.inactivate(start) 377 tm.mu.Unlock() 378 inactive = nil 379 case <-xfer.Done(): 380 tm.mu.Lock() 381 if inactive != nil { 382 tm.inactivate(start) 383 } 384 delete(tm.transfers, key) 385 tm.mu.Unlock() 386 xfer.Close() 387 return 388 } 389 } 390 }() 391 392 return xfer, watcher 393 } 394 395 func (tm *transferManager) inactivate(start chan struct{}) { 396 // If the transfer was started, remove it from the activeTransfers 397 // count. 398 select { 399 case <-start: 400 // Start next transfer if any are waiting 401 if len(tm.waitingTransfers) != 0 { 402 close(tm.waitingTransfers[0]) 403 tm.waitingTransfers = tm.waitingTransfers[1:] 404 } else { 405 tm.activeTransfers-- 406 } 407 default: 408 } 409 }