github.com/containerd/containerd@v22.0.0-20200918172823-438c87b8e050+incompatible/cmd/ctr/commands/content/fetch.go (about)

     1  /*
     2     Copyright The containerd Authors.
     3  
     4     Licensed under the Apache License, Version 2.0 (the "License");
     5     you may not use this file except in compliance with the License.
     6     You may obtain a copy of the License at
     7  
     8         http://www.apache.org/licenses/LICENSE-2.0
     9  
    10     Unless required by applicable law or agreed to in writing, software
    11     distributed under the License is distributed on an "AS IS" BASIS,
    12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13     See the License for the specific language governing permissions and
    14     limitations under the License.
    15  */
    16  
    17  package content
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"io"
    23  	"os"
    24  	"sync"
    25  	"text/tabwriter"
    26  	"time"
    27  
    28  	"github.com/containerd/containerd"
    29  	"github.com/containerd/containerd/cmd/ctr/commands"
    30  	"github.com/containerd/containerd/content"
    31  	"github.com/containerd/containerd/errdefs"
    32  	"github.com/containerd/containerd/images"
    33  	"github.com/containerd/containerd/log"
    34  	"github.com/containerd/containerd/pkg/progress"
    35  	"github.com/containerd/containerd/platforms"
    36  	"github.com/containerd/containerd/remotes"
    37  	"github.com/opencontainers/go-digest"
    38  	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
    39  	"github.com/urfave/cli"
    40  )
    41  
    42  var fetchCommand = cli.Command{
    43  	Name:      "fetch",
    44  	Usage:     "fetch all content for an image into containerd",
    45  	ArgsUsage: "[flags] <remote> <object>",
    46  	Description: `Fetch an image into containerd.
    47  	
    48  This command ensures that containerd has all the necessary resources to build
    49  an image's rootfs and convert the configuration to a runtime format supported
    50  by containerd.
    51  
    52  This command uses the same syntax, of remote and object, as 'ctr fetch-object'.
    53  We may want to make this nicer, but agnostism is preferred for the moment.
    54  
    55  Right now, the responsibility of the daemon and the cli aren't quite clear. Do
    56  not use this implementation as a guide. The end goal should be having metadata,
    57  content and snapshots ready for a direct use via the 'ctr run'.
    58  
    59  Most of this is experimental and there are few leaps to make this work.`,
    60  	Flags: append(commands.RegistryFlags, commands.LabelFlag,
    61  		cli.StringSliceFlag{
    62  			Name:  "platform",
    63  			Usage: "Pull content from a specific platform",
    64  		},
    65  		cli.BoolFlag{
    66  			Name:  "all-platforms",
    67  			Usage: "pull content from all platforms",
    68  		},
    69  		cli.BoolFlag{
    70  			Name:  "all-metadata",
    71  			Usage: "Pull metadata for all platforms",
    72  		},
    73  		cli.BoolFlag{
    74  			Name:  "metadata-only",
    75  			Usage: "Pull all metadata including manifests and configs",
    76  		},
    77  	),
    78  	Action: func(clicontext *cli.Context) error {
    79  		var (
    80  			ref = clicontext.Args().First()
    81  		)
    82  		client, ctx, cancel, err := commands.NewClient(clicontext)
    83  		if err != nil {
    84  			return err
    85  		}
    86  		defer cancel()
    87  		config, err := NewFetchConfig(ctx, clicontext)
    88  		if err != nil {
    89  			return err
    90  		}
    91  
    92  		_, err = Fetch(ctx, client, ref, config)
    93  		return err
    94  	},
    95  }
    96  
    97  // FetchConfig for content fetch
    98  type FetchConfig struct {
    99  	// Resolver
   100  	Resolver remotes.Resolver
   101  	// ProgressOutput to display progress
   102  	ProgressOutput io.Writer
   103  	// Labels to set on the content
   104  	Labels []string
   105  	// PlatformMatcher matches platforms, supersedes Platforms
   106  	PlatformMatcher platforms.MatchComparer
   107  	// Platforms to fetch
   108  	Platforms []string
   109  	// Whether or not download all metadata
   110  	AllMetadata bool
   111  }
   112  
   113  // NewFetchConfig returns the default FetchConfig from cli flags
   114  func NewFetchConfig(ctx context.Context, clicontext *cli.Context) (*FetchConfig, error) {
   115  	resolver, err := commands.GetResolver(ctx, clicontext)
   116  	if err != nil {
   117  		return nil, err
   118  	}
   119  	config := &FetchConfig{
   120  		Resolver: resolver,
   121  		Labels:   clicontext.StringSlice("label"),
   122  	}
   123  	if !clicontext.GlobalBool("debug") {
   124  		config.ProgressOutput = os.Stdout
   125  	}
   126  	if !clicontext.Bool("all-platforms") {
   127  		p := clicontext.StringSlice("platform")
   128  		if len(p) == 0 {
   129  			p = append(p, platforms.DefaultString())
   130  		}
   131  		config.Platforms = p
   132  	}
   133  
   134  	if clicontext.Bool("metadata-only") {
   135  		config.AllMetadata = true
   136  		// Any with an empty set is None
   137  		config.PlatformMatcher = platforms.Any()
   138  	} else if clicontext.Bool("all-metadata") {
   139  		config.AllMetadata = true
   140  	}
   141  
   142  	return config, nil
   143  }
   144  
   145  // Fetch loads all resources into the content store and returns the image
   146  func Fetch(ctx context.Context, client *containerd.Client, ref string, config *FetchConfig) (images.Image, error) {
   147  	ongoing := newJobs(ref)
   148  
   149  	pctx, stopProgress := context.WithCancel(ctx)
   150  	progress := make(chan struct{})
   151  
   152  	go func() {
   153  		if config.ProgressOutput != nil {
   154  			// no progress bar, because it hides some debug logs
   155  			showProgress(pctx, ongoing, client.ContentStore(), config.ProgressOutput)
   156  		}
   157  		close(progress)
   158  	}()
   159  
   160  	h := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
   161  		if desc.MediaType != images.MediaTypeDockerSchema1Manifest {
   162  			ongoing.add(desc)
   163  		}
   164  		return nil, nil
   165  	})
   166  
   167  	log.G(pctx).WithField("image", ref).Debug("fetching")
   168  	labels := commands.LabelArgs(config.Labels)
   169  	opts := []containerd.RemoteOpt{
   170  		containerd.WithPullLabels(labels),
   171  		containerd.WithResolver(config.Resolver),
   172  		containerd.WithImageHandler(h),
   173  		containerd.WithSchema1Conversion,
   174  	}
   175  
   176  	if config.AllMetadata {
   177  		opts = append(opts, containerd.WithAllMetadata())
   178  	}
   179  
   180  	if config.PlatformMatcher != nil {
   181  		opts = append(opts, containerd.WithPlatformMatcher(config.PlatformMatcher))
   182  	} else {
   183  		for _, platform := range config.Platforms {
   184  			opts = append(opts, containerd.WithPlatform(platform))
   185  		}
   186  	}
   187  
   188  	img, err := client.Fetch(pctx, ref, opts...)
   189  	stopProgress()
   190  	if err != nil {
   191  		return images.Image{}, err
   192  	}
   193  
   194  	<-progress
   195  	return img, nil
   196  }
   197  
   198  func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, out io.Writer) {
   199  	var (
   200  		ticker   = time.NewTicker(100 * time.Millisecond)
   201  		fw       = progress.NewWriter(out)
   202  		start    = time.Now()
   203  		statuses = map[string]StatusInfo{}
   204  		done     bool
   205  	)
   206  	defer ticker.Stop()
   207  
   208  outer:
   209  	for {
   210  		select {
   211  		case <-ticker.C:
   212  			fw.Flush()
   213  
   214  			tw := tabwriter.NewWriter(fw, 1, 8, 1, ' ', 0)
   215  
   216  			resolved := "resolved"
   217  			if !ongoing.isResolved() {
   218  				resolved = "resolving"
   219  			}
   220  			statuses[ongoing.name] = StatusInfo{
   221  				Ref:    ongoing.name,
   222  				Status: resolved,
   223  			}
   224  			keys := []string{ongoing.name}
   225  
   226  			activeSeen := map[string]struct{}{}
   227  			if !done {
   228  				active, err := cs.ListStatuses(ctx, "")
   229  				if err != nil {
   230  					log.G(ctx).WithError(err).Error("active check failed")
   231  					continue
   232  				}
   233  				// update status of active entries!
   234  				for _, active := range active {
   235  					statuses[active.Ref] = StatusInfo{
   236  						Ref:       active.Ref,
   237  						Status:    "downloading",
   238  						Offset:    active.Offset,
   239  						Total:     active.Total,
   240  						StartedAt: active.StartedAt,
   241  						UpdatedAt: active.UpdatedAt,
   242  					}
   243  					activeSeen[active.Ref] = struct{}{}
   244  				}
   245  			}
   246  
   247  			// now, update the items in jobs that are not in active
   248  			for _, j := range ongoing.jobs() {
   249  				key := remotes.MakeRefKey(ctx, j)
   250  				keys = append(keys, key)
   251  				if _, ok := activeSeen[key]; ok {
   252  					continue
   253  				}
   254  
   255  				status, ok := statuses[key]
   256  				if !done && (!ok || status.Status == "downloading") {
   257  					info, err := cs.Info(ctx, j.Digest)
   258  					if err != nil {
   259  						if !errdefs.IsNotFound(err) {
   260  							log.G(ctx).WithError(err).Errorf("failed to get content info")
   261  							continue outer
   262  						} else {
   263  							statuses[key] = StatusInfo{
   264  								Ref:    key,
   265  								Status: "waiting",
   266  							}
   267  						}
   268  					} else if info.CreatedAt.After(start) {
   269  						statuses[key] = StatusInfo{
   270  							Ref:       key,
   271  							Status:    "done",
   272  							Offset:    info.Size,
   273  							Total:     info.Size,
   274  							UpdatedAt: info.CreatedAt,
   275  						}
   276  					} else {
   277  						statuses[key] = StatusInfo{
   278  							Ref:    key,
   279  							Status: "exists",
   280  						}
   281  					}
   282  				} else if done {
   283  					if ok {
   284  						if status.Status != "done" && status.Status != "exists" {
   285  							status.Status = "done"
   286  							statuses[key] = status
   287  						}
   288  					} else {
   289  						statuses[key] = StatusInfo{
   290  							Ref:    key,
   291  							Status: "done",
   292  						}
   293  					}
   294  				}
   295  			}
   296  
   297  			var ordered []StatusInfo
   298  			for _, key := range keys {
   299  				ordered = append(ordered, statuses[key])
   300  			}
   301  
   302  			Display(tw, ordered, start)
   303  			tw.Flush()
   304  
   305  			if done {
   306  				fw.Flush()
   307  				return
   308  			}
   309  		case <-ctx.Done():
   310  			done = true // allow ui to update once more
   311  		}
   312  	}
   313  }
   314  
   315  // jobs provides a way of identifying the download keys for a particular task
   316  // encountering during the pull walk.
   317  //
   318  // This is very minimal and will probably be replaced with something more
   319  // featured.
   320  type jobs struct {
   321  	name     string
   322  	added    map[digest.Digest]struct{}
   323  	descs    []ocispec.Descriptor
   324  	mu       sync.Mutex
   325  	resolved bool
   326  }
   327  
   328  func newJobs(name string) *jobs {
   329  	return &jobs{
   330  		name:  name,
   331  		added: map[digest.Digest]struct{}{},
   332  	}
   333  }
   334  
   335  func (j *jobs) add(desc ocispec.Descriptor) {
   336  	j.mu.Lock()
   337  	defer j.mu.Unlock()
   338  	j.resolved = true
   339  
   340  	if _, ok := j.added[desc.Digest]; ok {
   341  		return
   342  	}
   343  	j.descs = append(j.descs, desc)
   344  	j.added[desc.Digest] = struct{}{}
   345  }
   346  
   347  func (j *jobs) jobs() []ocispec.Descriptor {
   348  	j.mu.Lock()
   349  	defer j.mu.Unlock()
   350  
   351  	var descs []ocispec.Descriptor
   352  	return append(descs, j.descs...)
   353  }
   354  
   355  func (j *jobs) isResolved() bool {
   356  	j.mu.Lock()
   357  	defer j.mu.Unlock()
   358  	return j.resolved
   359  }
   360  
   361  // StatusInfo holds the status info for an upload or download
   362  type StatusInfo struct {
   363  	Ref       string
   364  	Status    string
   365  	Offset    int64
   366  	Total     int64
   367  	StartedAt time.Time
   368  	UpdatedAt time.Time
   369  }
   370  
   371  // Display pretty prints out the download or upload progress
   372  func Display(w io.Writer, statuses []StatusInfo, start time.Time) {
   373  	var total int64
   374  	for _, status := range statuses {
   375  		total += status.Offset
   376  		switch status.Status {
   377  		case "downloading", "uploading":
   378  			var bar progress.Bar
   379  			if status.Total > 0.0 {
   380  				bar = progress.Bar(float64(status.Offset) / float64(status.Total))
   381  			}
   382  			fmt.Fprintf(w, "%s:\t%s\t%40r\t%8.8s/%s\t\n",
   383  				status.Ref,
   384  				status.Status,
   385  				bar,
   386  				progress.Bytes(status.Offset), progress.Bytes(status.Total))
   387  		case "resolving", "waiting":
   388  			bar := progress.Bar(0.0)
   389  			fmt.Fprintf(w, "%s:\t%s\t%40r\t\n",
   390  				status.Ref,
   391  				status.Status,
   392  				bar)
   393  		default:
   394  			bar := progress.Bar(1.0)
   395  			fmt.Fprintf(w, "%s:\t%s\t%40r\t\n",
   396  				status.Ref,
   397  				status.Status,
   398  				bar)
   399  		}
   400  	}
   401  
   402  	fmt.Fprintf(w, "elapsed: %-4.1fs\ttotal: %7.6v\t(%v)\t\n",
   403  		time.Since(start).Seconds(),
   404  		// TODO(stevvooe): These calculations are actually way off.
   405  		// Need to account for previously downloaded data. These
   406  		// will basically be right for a download the first time
   407  		// but will be skewed if restarting, as it includes the
   408  		// data into the start time before.
   409  		progress.Bytes(total),
   410  		progress.NewBytesPerSecond(total, time.Since(start)))
   411  }