github.com/containerd/nerdctl/v2@v2.0.0-beta.5.0.20240520001846-b5758f54fa28/pkg/imgutil/jobs/jobs.go (about)

     1  /*
     2     Copyright The containerd Authors.
     3  
     4     Licensed under the Apache License, Version 2.0 (the "License");
     5     you may not use this file except in compliance with the License.
     6     You may obtain a copy of the License at
     7  
     8         http://www.apache.org/licenses/LICENSE-2.0
     9  
    10     Unless required by applicable law or agreed to in writing, software
    11     distributed under the License is distributed on an "AS IS" BASIS,
    12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13     See the License for the specific language governing permissions and
    14     limitations under the License.
    15  */
    16  
    17  package jobs
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"io"
    23  	"sync"
    24  	"text/tabwriter"
    25  	"time"
    26  
    27  	"github.com/containerd/containerd/content"
    28  	"github.com/containerd/containerd/errdefs"
    29  	"github.com/containerd/containerd/pkg/progress"
    30  	"github.com/containerd/containerd/remotes"
    31  	"github.com/containerd/log"
    32  	"github.com/opencontainers/go-digest"
    33  	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
    34  )
    35  
    36  // ShowProgress continuously updates the output with job progress
    37  // by checking status in the content store.
    38  //
    39  // From https://github.com/containerd/containerd/blob/v1.7.0-rc.2/cmd/ctr/commands/content/fetch.go#L219-L336
    40  func ShowProgress(ctx context.Context, ongoing *Jobs, cs content.Store, out io.Writer) {
    41  	var (
    42  		ticker   = time.NewTicker(100 * time.Millisecond)
    43  		fw       = progress.NewWriter(out)
    44  		start    = time.Now()
    45  		statuses = map[string]StatusInfo{}
    46  		done     bool
    47  	)
    48  	defer ticker.Stop()
    49  
    50  outer:
    51  	for {
    52  		select {
    53  		case <-ticker.C:
    54  			fw.Flush()
    55  
    56  			tw := tabwriter.NewWriter(fw, 1, 8, 1, ' ', 0)
    57  
    58  			resolved := StatusResolved
    59  			if !ongoing.IsResolved() {
    60  				resolved = StatusResolving
    61  			}
    62  			statuses[ongoing.name] = StatusInfo{
    63  				Ref:    ongoing.name,
    64  				Status: resolved,
    65  			}
    66  			keys := []string{ongoing.name}
    67  
    68  			activeSeen := map[string]struct{}{}
    69  			if !done {
    70  				active, err := cs.ListStatuses(ctx, "")
    71  				if err != nil {
    72  					log.G(ctx).WithError(err).Error("active check failed")
    73  					continue
    74  				}
    75  				// update status of active entries!
    76  				for _, active := range active {
    77  					statuses[active.Ref] = StatusInfo{
    78  						Ref:       active.Ref,
    79  						Status:    StatusDownloading,
    80  						Offset:    active.Offset,
    81  						Total:     active.Total,
    82  						StartedAt: active.StartedAt,
    83  						UpdatedAt: active.UpdatedAt,
    84  					}
    85  					activeSeen[active.Ref] = struct{}{}
    86  				}
    87  			}
    88  
    89  			// now, update the items in jobs that are not in active
    90  			for _, j := range ongoing.Jobs() {
    91  				key := remotes.MakeRefKey(ctx, j)
    92  				keys = append(keys, key)
    93  				if _, ok := activeSeen[key]; ok {
    94  					continue
    95  				}
    96  
    97  				status, ok := statuses[key]
    98  				if !done && (!ok || status.Status == StatusDownloading) {
    99  					info, err := cs.Info(ctx, j.Digest)
   100  					if err != nil {
   101  						if !errdefs.IsNotFound(err) {
   102  							log.G(ctx).WithError(err).Error("failed to get content info")
   103  							continue outer
   104  						}
   105  						statuses[key] = StatusInfo{
   106  							Ref:    key,
   107  							Status: StatusWaiting,
   108  						}
   109  					} else if info.CreatedAt.After(start) {
   110  						statuses[key] = StatusInfo{
   111  							Ref:       key,
   112  							Status:    StatusDone,
   113  							Offset:    info.Size,
   114  							Total:     info.Size,
   115  							UpdatedAt: info.CreatedAt,
   116  						}
   117  					} else {
   118  						statuses[key] = StatusInfo{
   119  							Ref:    key,
   120  							Status: StatusExists,
   121  						}
   122  					}
   123  				} else if done {
   124  					if ok {
   125  						if status.Status != StatusDone && status.Status != StatusExists {
   126  							status.Status = StatusDone
   127  							statuses[key] = status
   128  						}
   129  					} else {
   130  						statuses[key] = StatusInfo{
   131  							Ref:    key,
   132  							Status: StatusDone,
   133  						}
   134  					}
   135  				}
   136  			}
   137  
   138  			var ordered []StatusInfo
   139  			for _, key := range keys {
   140  				ordered = append(ordered, statuses[key])
   141  			}
   142  
   143  			Display(tw, ordered, start)
   144  			tw.Flush()
   145  
   146  			if done {
   147  				fw.Flush()
   148  				return
   149  			}
   150  		case <-ctx.Done():
   151  			done = true // allow ui to update once more
   152  		}
   153  	}
   154  }
   155  
   156  // Jobs provides a way of identifying the download keys for a particular task
   157  // encountering during the pull walk.
   158  //
   159  // This is very minimal and will probably be replaced with something more
   160  // featured.
   161  //
   162  // From https://github.com/containerd/containerd/blob/v1.7.0-rc.2/cmd/ctr/commands/content/fetch.go#L338-L349
   163  type Jobs struct {
   164  	name     string
   165  	added    map[digest.Digest]struct{}
   166  	descs    []ocispec.Descriptor
   167  	mu       sync.Mutex
   168  	resolved bool
   169  }
   170  
   171  // New creates a new instance of the job status tracker.
   172  // From https://github.com/containerd/containerd/blob/v1.7.0-rc.2/cmd/ctr/commands/content/fetch.go#L351-L357
   173  func New(name string) *Jobs {
   174  	return &Jobs{
   175  		name:  name,
   176  		added: map[digest.Digest]struct{}{},
   177  	}
   178  }
   179  
   180  // Add adds a descriptor to be tracked.
   181  // From https://github.com/containerd/containerd/blob/v1.7.0-rc.2/cmd/ctr/commands/content/fetch.go#L359-L370
   182  func (j *Jobs) Add(desc ocispec.Descriptor) {
   183  	j.mu.Lock()
   184  	defer j.mu.Unlock()
   185  	j.resolved = true
   186  
   187  	if _, ok := j.added[desc.Digest]; ok {
   188  		return
   189  	}
   190  	j.descs = append(j.descs, desc)
   191  	j.added[desc.Digest] = struct{}{}
   192  }
   193  
   194  // Jobs returns a list of all tracked descriptors.
   195  // From https://github.com/containerd/containerd/blob/v1.7.0-rc.2/cmd/ctr/commands/content/fetch.go#L372-L379
   196  func (j *Jobs) Jobs() []ocispec.Descriptor {
   197  	j.mu.Lock()
   198  	defer j.mu.Unlock()
   199  
   200  	var descs []ocispec.Descriptor
   201  	return append(descs, j.descs...)
   202  }
   203  
   204  // IsResolved checks whether a descriptor has been resolved.
   205  // From https://github.com/containerd/containerd/blob/v1.7.0-rc.2/cmd/ctr/commands/content/fetch.go#L381-L386
   206  func (j *Jobs) IsResolved() bool {
   207  	j.mu.Lock()
   208  	defer j.mu.Unlock()
   209  	return j.resolved
   210  }
   211  
   212  // StatusInfoStatus describes status info for an upload or download.
   213  // From https://github.com/containerd/containerd/blob/v1.7.0-rc.2/cmd/ctr/commands/content/fetch.go#L388-L400
   214  type StatusInfoStatus string
   215  
   216  const (
   217  	StatusResolved    StatusInfoStatus = "resolved"
   218  	StatusResolving   StatusInfoStatus = "resolving"
   219  	StatusWaiting     StatusInfoStatus = "waiting"
   220  	StatusCommitting  StatusInfoStatus = "committing"
   221  	StatusDone        StatusInfoStatus = "done"
   222  	StatusDownloading StatusInfoStatus = "downloading"
   223  	StatusUploading   StatusInfoStatus = "uploading"
   224  	StatusExists      StatusInfoStatus = "exists"
   225  )
   226  
   227  // StatusInfo holds the status info for an upload or download.
   228  // From https://github.com/containerd/containerd/blob/v1.7.0-rc.2/cmd/ctr/commands/content/fetch.go#L402-L410
   229  type StatusInfo struct {
   230  	Ref       string
   231  	Status    StatusInfoStatus
   232  	Offset    int64
   233  	Total     int64
   234  	StartedAt time.Time
   235  	UpdatedAt time.Time
   236  }
   237  
   238  // Display pretty prints out the download or upload progress.
   239  // From https://github.com/containerd/containerd/blob/v1.7.0-rc.2/cmd/ctr/commands/content/fetch.go#L412-L452
   240  func Display(w io.Writer, statuses []StatusInfo, start time.Time) {
   241  	var total int64
   242  	for _, status := range statuses {
   243  		total += status.Offset
   244  		switch status.Status {
   245  		case StatusDownloading, StatusUploading:
   246  			var bar progress.Bar
   247  			if status.Total > 0.0 {
   248  				bar = progress.Bar(float64(status.Offset) / float64(status.Total))
   249  			}
   250  			fmt.Fprintf(w, "%s:\t%s\t%40r\t%8.8s/%s\t\n",
   251  				status.Ref,
   252  				status.Status,
   253  				bar,
   254  				progress.Bytes(status.Offset), progress.Bytes(status.Total))
   255  		case StatusResolving, StatusWaiting:
   256  			bar := progress.Bar(0.0)
   257  			fmt.Fprintf(w, "%s:\t%s\t%40r\t\n",
   258  				status.Ref,
   259  				status.Status,
   260  				bar)
   261  		default:
   262  			bar := progress.Bar(1.0)
   263  			fmt.Fprintf(w, "%s:\t%s\t%40r\t\n",
   264  				status.Ref,
   265  				status.Status,
   266  				bar)
   267  		}
   268  	}
   269  
   270  	fmt.Fprintf(w, "elapsed: %-4.1fs\ttotal: %7.6v\t(%v)\t\n",
   271  		time.Since(start).Seconds(),
   272  		// TODO(stevvooe): These calculations are actually way off.
   273  		// Need to account for previously downloaded data. These
   274  		// will basically be right for a download the first time
   275  		// but will be skewed if restarting, as it includes the
   276  		// data into the start time before.
   277  		progress.Bytes(total),
   278  		progress.NewBytesPerSecond(total, time.Since(start)))
   279  }