github.com/rumpl/bof@v23.0.0-rc.2+incompatible/plugin/backend_linux.go (about)

     1  package plugin // import "github.com/docker/docker/plugin"
     2  
     3  import (
     4  	"archive/tar"
     5  	"bytes"
     6  	"compress/gzip"
     7  	"context"
     8  	"encoding/json"
     9  	"io"
    10  	"net/http"
    11  	"os"
    12  	"path"
    13  	"path/filepath"
    14  	"strings"
    15  	"time"
    16  
    17  	"github.com/containerd/containerd/content"
    18  	"github.com/containerd/containerd/images"
    19  	"github.com/containerd/containerd/platforms"
    20  	"github.com/containerd/containerd/remotes"
    21  	"github.com/containerd/containerd/remotes/docker"
    22  	"github.com/docker/distribution/manifest/schema2"
    23  	"github.com/docker/distribution/reference"
    24  	"github.com/docker/docker/api/types"
    25  	"github.com/docker/docker/api/types/filters"
    26  	"github.com/docker/docker/dockerversion"
    27  	"github.com/docker/docker/errdefs"
    28  	"github.com/docker/docker/pkg/authorization"
    29  	"github.com/docker/docker/pkg/chrootarchive"
    30  	"github.com/docker/docker/pkg/containerfs"
    31  	"github.com/docker/docker/pkg/pools"
    32  	"github.com/docker/docker/pkg/progress"
    33  	"github.com/docker/docker/pkg/stringid"
    34  	v2 "github.com/docker/docker/plugin/v2"
    35  	"github.com/moby/sys/mount"
    36  	"github.com/opencontainers/go-digest"
    37  	specs "github.com/opencontainers/image-spec/specs-go/v1"
    38  	"github.com/pkg/errors"
    39  	"github.com/sirupsen/logrus"
    40  )
    41  
    42  var acceptedPluginFilterTags = map[string]bool{
    43  	"enabled":    true,
    44  	"capability": true,
    45  }
    46  
    47  // Disable deactivates a plugin. This means resources (volumes, networks) cant use them.
    48  func (pm *Manager) Disable(refOrID string, config *types.PluginDisableConfig) error {
    49  	p, err := pm.config.Store.GetV2Plugin(refOrID)
    50  	if err != nil {
    51  		return err
    52  	}
    53  	pm.mu.RLock()
    54  	c := pm.cMap[p]
    55  	pm.mu.RUnlock()
    56  
    57  	if !config.ForceDisable && p.GetRefCount() > 0 {
    58  		return errors.WithStack(inUseError(p.Name()))
    59  	}
    60  
    61  	for _, typ := range p.GetTypes() {
    62  		if typ.Capability == authorization.AuthZApiImplements {
    63  			pm.config.AuthzMiddleware.RemovePlugin(p.Name())
    64  		}
    65  	}
    66  
    67  	if err := pm.disable(p, c); err != nil {
    68  		return err
    69  	}
    70  	pm.publisher.Publish(EventDisable{Plugin: p.PluginObj})
    71  	pm.config.LogPluginEvent(p.GetID(), refOrID, "disable")
    72  	return nil
    73  }
    74  
    75  // Enable activates a plugin, which implies that they are ready to be used by containers.
    76  func (pm *Manager) Enable(refOrID string, config *types.PluginEnableConfig) error {
    77  	p, err := pm.config.Store.GetV2Plugin(refOrID)
    78  	if err != nil {
    79  		return err
    80  	}
    81  
    82  	c := &controller{timeoutInSecs: config.Timeout}
    83  	if err := pm.enable(p, c, false); err != nil {
    84  		return err
    85  	}
    86  	pm.publisher.Publish(EventEnable{Plugin: p.PluginObj})
    87  	pm.config.LogPluginEvent(p.GetID(), refOrID, "enable")
    88  	return nil
    89  }
    90  
    91  // Inspect examines a plugin config
    92  func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) {
    93  	p, err := pm.config.Store.GetV2Plugin(refOrID)
    94  	if err != nil {
    95  		return nil, err
    96  	}
    97  
    98  	return &p.PluginObj, nil
    99  }
   100  
   101  func computePrivileges(c types.PluginConfig) types.PluginPrivileges {
   102  	var privileges types.PluginPrivileges
   103  	if c.Network.Type != "null" && c.Network.Type != "bridge" && c.Network.Type != "" {
   104  		privileges = append(privileges, types.PluginPrivilege{
   105  			Name:        "network",
   106  			Description: "permissions to access a network",
   107  			Value:       []string{c.Network.Type},
   108  		})
   109  	}
   110  	if c.IpcHost {
   111  		privileges = append(privileges, types.PluginPrivilege{
   112  			Name:        "host ipc namespace",
   113  			Description: "allow access to host ipc namespace",
   114  			Value:       []string{"true"},
   115  		})
   116  	}
   117  	if c.PidHost {
   118  		privileges = append(privileges, types.PluginPrivilege{
   119  			Name:        "host pid namespace",
   120  			Description: "allow access to host pid namespace",
   121  			Value:       []string{"true"},
   122  		})
   123  	}
   124  	for _, mount := range c.Mounts {
   125  		if mount.Source != nil {
   126  			privileges = append(privileges, types.PluginPrivilege{
   127  				Name:        "mount",
   128  				Description: "host path to mount",
   129  				Value:       []string{*mount.Source},
   130  			})
   131  		}
   132  	}
   133  	for _, device := range c.Linux.Devices {
   134  		if device.Path != nil {
   135  			privileges = append(privileges, types.PluginPrivilege{
   136  				Name:        "device",
   137  				Description: "host device to access",
   138  				Value:       []string{*device.Path},
   139  			})
   140  		}
   141  	}
   142  	if c.Linux.AllowAllDevices {
   143  		privileges = append(privileges, types.PluginPrivilege{
   144  			Name:        "allow-all-devices",
   145  			Description: "allow 'rwm' access to all devices",
   146  			Value:       []string{"true"},
   147  		})
   148  	}
   149  	if len(c.Linux.Capabilities) > 0 {
   150  		privileges = append(privileges, types.PluginPrivilege{
   151  			Name:        "capabilities",
   152  			Description: "list of additional capabilities required",
   153  			Value:       c.Linux.Capabilities,
   154  		})
   155  	}
   156  
   157  	return privileges
   158  }
   159  
   160  // Privileges pulls a plugin config and computes the privileges required to install it.
   161  func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) {
   162  	var (
   163  		config     types.PluginConfig
   164  		configSeen bool
   165  	)
   166  
   167  	h := func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) {
   168  		switch desc.MediaType {
   169  		case schema2.MediaTypeManifest, specs.MediaTypeImageManifest:
   170  			data, err := content.ReadBlob(ctx, pm.blobStore, desc)
   171  			if err != nil {
   172  				return nil, errors.Wrapf(err, "error reading image manifest from blob store for %s", ref)
   173  			}
   174  
   175  			var m specs.Manifest
   176  			if err := json.Unmarshal(data, &m); err != nil {
   177  				return nil, errors.Wrapf(err, "error unmarshaling image manifest for %s", ref)
   178  			}
   179  			return []specs.Descriptor{m.Config}, nil
   180  		case schema2.MediaTypePluginConfig:
   181  			configSeen = true
   182  			data, err := content.ReadBlob(ctx, pm.blobStore, desc)
   183  			if err != nil {
   184  				return nil, errors.Wrapf(err, "error reading plugin config from blob store for %s", ref)
   185  			}
   186  
   187  			if err := json.Unmarshal(data, &config); err != nil {
   188  				return nil, errors.Wrapf(err, "error unmarshaling plugin config for %s", ref)
   189  			}
   190  		}
   191  
   192  		return nil, nil
   193  	}
   194  
   195  	if err := pm.fetch(ctx, ref, authConfig, progress.DiscardOutput(), metaHeader, images.HandlerFunc(h)); err != nil {
   196  		return types.PluginPrivileges{}, nil
   197  	}
   198  
   199  	if !configSeen {
   200  		return types.PluginPrivileges{}, errors.Errorf("did not find plugin config for specified reference %s", ref)
   201  	}
   202  
   203  	return computePrivileges(config), nil
   204  }
   205  
   206  // Upgrade upgrades a plugin
   207  //
   208  // TODO: replace reference package usage with simpler url.Parse semantics
   209  func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) {
   210  	p, err := pm.config.Store.GetV2Plugin(name)
   211  	if err != nil {
   212  		return err
   213  	}
   214  
   215  	if p.IsEnabled() {
   216  		return errors.Wrap(enabledError(p.Name()), "plugin must be disabled before upgrading")
   217  	}
   218  
   219  	// revalidate because Pull is public
   220  	if _, err := reference.ParseNormalizedNamed(name); err != nil {
   221  		return errors.Wrapf(errdefs.InvalidParameter(err), "failed to parse %q", name)
   222  	}
   223  
   224  	pm.muGC.RLock()
   225  	defer pm.muGC.RUnlock()
   226  
   227  	tmpRootFSDir, err := os.MkdirTemp(pm.tmpDir(), ".rootfs")
   228  	if err != nil {
   229  		return errors.Wrap(err, "error creating tmp dir for plugin rootfs")
   230  	}
   231  
   232  	var md fetchMeta
   233  
   234  	ctx, cancel := context.WithCancel(ctx)
   235  	out, waitProgress := setupProgressOutput(outStream, cancel)
   236  	defer waitProgress()
   237  
   238  	if err := pm.fetch(ctx, ref, authConfig, out, metaHeader, storeFetchMetadata(&md), childrenHandler(pm.blobStore), applyLayer(pm.blobStore, tmpRootFSDir, out)); err != nil {
   239  		return err
   240  	}
   241  	pm.config.LogPluginEvent(reference.FamiliarString(ref), name, "pull")
   242  
   243  	if err := validateFetchedMetadata(md); err != nil {
   244  		return err
   245  	}
   246  
   247  	if err := pm.upgradePlugin(p, md.config, md.manifest, md.blobs, tmpRootFSDir, &privileges); err != nil {
   248  		return err
   249  	}
   250  	p.PluginObj.PluginReference = ref.String()
   251  	return nil
   252  }
   253  
   254  // Pull pulls a plugin, check if the correct privileges are provided and install the plugin.
   255  //
   256  // TODO: replace reference package usage with simpler url.Parse semantics
   257  func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer, opts ...CreateOpt) (err error) {
   258  	pm.muGC.RLock()
   259  	defer pm.muGC.RUnlock()
   260  
   261  	// revalidate because Pull is public
   262  	nameref, err := reference.ParseNormalizedNamed(name)
   263  	if err != nil {
   264  		return errors.Wrapf(errdefs.InvalidParameter(err), "failed to parse %q", name)
   265  	}
   266  	name = reference.FamiliarString(reference.TagNameOnly(nameref))
   267  
   268  	if err := pm.config.Store.validateName(name); err != nil {
   269  		return errdefs.InvalidParameter(err)
   270  	}
   271  
   272  	tmpRootFSDir, err := os.MkdirTemp(pm.tmpDir(), ".rootfs")
   273  	if err != nil {
   274  		return errors.Wrap(errdefs.System(err), "error preparing upgrade")
   275  	}
   276  	defer os.RemoveAll(tmpRootFSDir)
   277  
   278  	var md fetchMeta
   279  
   280  	ctx, cancel := context.WithCancel(ctx)
   281  	out, waitProgress := setupProgressOutput(outStream, cancel)
   282  	defer waitProgress()
   283  
   284  	if err := pm.fetch(ctx, ref, authConfig, out, metaHeader, storeFetchMetadata(&md), childrenHandler(pm.blobStore), applyLayer(pm.blobStore, tmpRootFSDir, out)); err != nil {
   285  		return err
   286  	}
   287  	pm.config.LogPluginEvent(reference.FamiliarString(ref), name, "pull")
   288  
   289  	if err := validateFetchedMetadata(md); err != nil {
   290  		return err
   291  	}
   292  
   293  	refOpt := func(p *v2.Plugin) {
   294  		p.PluginObj.PluginReference = ref.String()
   295  	}
   296  	optsList := make([]CreateOpt, 0, len(opts)+1)
   297  	optsList = append(optsList, opts...)
   298  	optsList = append(optsList, refOpt)
   299  
   300  	// TODO: tmpRootFSDir is empty but should have layers in it
   301  	p, err := pm.createPlugin(name, md.config, md.manifest, md.blobs, tmpRootFSDir, &privileges, optsList...)
   302  	if err != nil {
   303  		return err
   304  	}
   305  
   306  	pm.publisher.Publish(EventCreate{Plugin: p.PluginObj})
   307  
   308  	return nil
   309  }
   310  
   311  // List displays the list of plugins and associated metadata.
   312  func (pm *Manager) List(pluginFilters filters.Args) ([]types.Plugin, error) {
   313  	if err := pluginFilters.Validate(acceptedPluginFilterTags); err != nil {
   314  		return nil, err
   315  	}
   316  
   317  	enabledOnly := false
   318  	disabledOnly := false
   319  	if pluginFilters.Contains("enabled") {
   320  		if pluginFilters.ExactMatch("enabled", "true") {
   321  			enabledOnly = true
   322  		} else if pluginFilters.ExactMatch("enabled", "false") {
   323  			disabledOnly = true
   324  		} else {
   325  			return nil, invalidFilter{"enabled", pluginFilters.Get("enabled")}
   326  		}
   327  	}
   328  
   329  	plugins := pm.config.Store.GetAll()
   330  	out := make([]types.Plugin, 0, len(plugins))
   331  
   332  next:
   333  	for _, p := range plugins {
   334  		if enabledOnly && !p.PluginObj.Enabled {
   335  			continue
   336  		}
   337  		if disabledOnly && p.PluginObj.Enabled {
   338  			continue
   339  		}
   340  		if pluginFilters.Contains("capability") {
   341  			for _, f := range p.GetTypes() {
   342  				if !pluginFilters.Match("capability", f.Capability) {
   343  					continue next
   344  				}
   345  			}
   346  		}
   347  		out = append(out, p.PluginObj)
   348  	}
   349  	return out, nil
   350  }
   351  
   352  // Push pushes a plugin to the registry.
   353  func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, outStream io.Writer) error {
   354  	p, err := pm.config.Store.GetV2Plugin(name)
   355  	if err != nil {
   356  		return err
   357  	}
   358  
   359  	ref, err := reference.ParseNormalizedNamed(p.Name())
   360  	if err != nil {
   361  		return errors.Wrapf(err, "plugin has invalid name %v for push", p.Name())
   362  	}
   363  
   364  	statusTracker := docker.NewInMemoryTracker()
   365  
   366  	resolver, err := pm.newResolver(ctx, statusTracker, authConfig, metaHeader, false)
   367  	if err != nil {
   368  		return err
   369  	}
   370  
   371  	pusher, err := resolver.Pusher(ctx, ref.String())
   372  	if err != nil {
   373  		return errors.Wrap(err, "error creating plugin pusher")
   374  	}
   375  
   376  	pj := newPushJobs(statusTracker)
   377  
   378  	ctx, cancel := context.WithCancel(ctx)
   379  	out, waitProgress := setupProgressOutput(outStream, cancel)
   380  	defer waitProgress()
   381  
   382  	progressHandler := images.HandlerFunc(func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) {
   383  		logrus.WithField("mediaType", desc.MediaType).WithField("digest", desc.Digest.String()).Debug("Preparing to push plugin layer")
   384  		id := stringid.TruncateID(desc.Digest.String())
   385  		pj.add(remotes.MakeRefKey(ctx, desc), id)
   386  		progress.Update(out, id, "Preparing")
   387  		return nil, nil
   388  	})
   389  
   390  	desc, err := pm.getManifestDescriptor(ctx, p)
   391  	if err != nil {
   392  		return errors.Wrap(err, "error reading plugin manifest")
   393  	}
   394  
   395  	progress.Messagef(out, "", "The push refers to repository [%s]", reference.FamiliarName(ref))
   396  
   397  	// TODO: If a layer already exists on the registry, the progress output just says "Preparing"
   398  	go func() {
   399  		timer := time.NewTimer(100 * time.Millisecond)
   400  		defer timer.Stop()
   401  		if !timer.Stop() {
   402  			<-timer.C
   403  		}
   404  		var statuses []contentStatus
   405  		for {
   406  			timer.Reset(100 * time.Millisecond)
   407  			select {
   408  			case <-ctx.Done():
   409  				return
   410  			case <-timer.C:
   411  				statuses = pj.status()
   412  			}
   413  
   414  			for _, s := range statuses {
   415  				out.WriteProgress(progress.Progress{ID: s.Ref, Current: s.Offset, Total: s.Total, Action: s.Status, LastUpdate: s.Offset == s.Total})
   416  			}
   417  		}
   418  	}()
   419  
   420  	// Make sure we can authenticate the request since the auth scope for plugin repos is different than a normal repo.
   421  	ctx = docker.WithScope(ctx, scope(ref, true))
   422  	if err := remotes.PushContent(ctx, pusher, desc, pm.blobStore, nil, nil, func(h images.Handler) images.Handler {
   423  		return images.Handlers(progressHandler, h)
   424  	}); err != nil {
   425  		// Try fallback to http.
   426  		// This is needed because the containerd pusher will only attempt the first registry config we pass, which would
   427  		// typically be https.
   428  		// If there are no http-only host configs found we'll error out anyway.
   429  		resolver, _ := pm.newResolver(ctx, statusTracker, authConfig, metaHeader, true)
   430  		if resolver != nil {
   431  			pusher, _ := resolver.Pusher(ctx, ref.String())
   432  			if pusher != nil {
   433  				logrus.WithField("ref", ref).Debug("Re-attmpting push with http-fallback")
   434  				err2 := remotes.PushContent(ctx, pusher, desc, pm.blobStore, nil, nil, func(h images.Handler) images.Handler {
   435  					return images.Handlers(progressHandler, h)
   436  				})
   437  				if err2 == nil {
   438  					err = nil
   439  				} else {
   440  					logrus.WithError(err2).WithField("ref", ref).Debug("Error while attempting push with http-fallback")
   441  				}
   442  			}
   443  		}
   444  		if err != nil {
   445  			return errors.Wrap(err, "error pushing plugin")
   446  		}
   447  	}
   448  
   449  	// For blobs that already exist in the registry we need to make sure to update the progress otherwise it will just say "pending"
   450  	// TODO: How to check if the layer already exists? Is it worth it?
   451  	for _, j := range pj.jobs {
   452  		progress.Update(out, pj.names[j], "Upload complete")
   453  	}
   454  
   455  	// Signal the client for content trust verification
   456  	progress.Aux(out, types.PushResult{Tag: ref.(reference.Tagged).Tag(), Digest: desc.Digest.String(), Size: int(desc.Size)})
   457  
   458  	return nil
   459  }
   460  
   461  // manifest wraps an OCI manifest, because...
   462  // Historically the registry does not support plugins unless the media type on the manifest is specifically schema2.MediaTypeManifest
   463  // So the OCI manifest media type is not supported.
   464  // Additionally, there is extra validation for the docker schema2 manifest than there is a mediatype set on the manifest itself
   465  // even though this is set on the descriptor
   466  // The OCI types do not have this field.
   467  type manifest struct {
   468  	specs.Manifest
   469  	MediaType string `json:"mediaType,omitempty"`
   470  }
   471  
   472  func buildManifest(ctx context.Context, s content.Manager, config digest.Digest, layers []digest.Digest) (manifest, error) {
   473  	var m manifest
   474  	m.MediaType = images.MediaTypeDockerSchema2Manifest
   475  	m.SchemaVersion = 2
   476  
   477  	configInfo, err := s.Info(ctx, config)
   478  	if err != nil {
   479  		return m, errors.Wrapf(err, "error reading plugin config content for digest %s", config)
   480  	}
   481  	m.Config = specs.Descriptor{
   482  		MediaType: mediaTypePluginConfig,
   483  		Size:      configInfo.Size,
   484  		Digest:    configInfo.Digest,
   485  	}
   486  
   487  	for _, l := range layers {
   488  		info, err := s.Info(ctx, l)
   489  		if err != nil {
   490  			return m, errors.Wrapf(err, "error fetching info for content digest %s", l)
   491  		}
   492  		m.Layers = append(m.Layers, specs.Descriptor{
   493  			MediaType: images.MediaTypeDockerSchema2LayerGzip, // TODO: This is assuming everything is a gzip compressed layer, but that may not be true.
   494  			Digest:    l,
   495  			Size:      info.Size,
   496  		})
   497  	}
   498  	return m, nil
   499  }
   500  
   501  // getManifestDescriptor gets the OCI descriptor for a manifest
   502  // It will generate a manifest if one does not exist
   503  func (pm *Manager) getManifestDescriptor(ctx context.Context, p *v2.Plugin) (specs.Descriptor, error) {
   504  	logger := logrus.WithField("plugin", p.Name()).WithField("digest", p.Manifest)
   505  	if p.Manifest != "" {
   506  		info, err := pm.blobStore.Info(ctx, p.Manifest)
   507  		if err == nil {
   508  			desc := specs.Descriptor{
   509  				Size:      info.Size,
   510  				Digest:    info.Digest,
   511  				MediaType: images.MediaTypeDockerSchema2Manifest,
   512  			}
   513  			return desc, nil
   514  		}
   515  		logger.WithError(err).Debug("Could not find plugin manifest in content store")
   516  	} else {
   517  		logger.Info("Plugin does not have manifest digest")
   518  	}
   519  	logger.Info("Building a new plugin manifest")
   520  
   521  	manifest, err := buildManifest(ctx, pm.blobStore, p.Config, p.Blobsums)
   522  	if err != nil {
   523  		return specs.Descriptor{}, err
   524  	}
   525  
   526  	desc, err := writeManifest(ctx, pm.blobStore, &manifest)
   527  	if err != nil {
   528  		return desc, err
   529  	}
   530  
   531  	if err := pm.save(p); err != nil {
   532  		logger.WithError(err).Error("Could not save plugin with manifest digest")
   533  	}
   534  	return desc, nil
   535  }
   536  
   537  func writeManifest(ctx context.Context, cs content.Store, m *manifest) (specs.Descriptor, error) {
   538  	platform := platforms.DefaultSpec()
   539  	desc := specs.Descriptor{
   540  		MediaType: images.MediaTypeDockerSchema2Manifest,
   541  		Platform:  &platform,
   542  	}
   543  	data, err := json.Marshal(m)
   544  	if err != nil {
   545  		return desc, errors.Wrap(err, "error encoding manifest")
   546  	}
   547  	desc.Digest = digest.FromBytes(data)
   548  	desc.Size = int64(len(data))
   549  
   550  	if err := content.WriteBlob(ctx, cs, remotes.MakeRefKey(ctx, desc), bytes.NewReader(data), desc); err != nil {
   551  		return desc, errors.Wrap(err, "error writing plugin manifest")
   552  	}
   553  	return desc, nil
   554  }
   555  
   556  // Remove deletes plugin's root directory.
   557  func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error {
   558  	p, err := pm.config.Store.GetV2Plugin(name)
   559  	pm.mu.RLock()
   560  	c := pm.cMap[p]
   561  	pm.mu.RUnlock()
   562  
   563  	if err != nil {
   564  		return err
   565  	}
   566  
   567  	if !config.ForceRemove {
   568  		if p.GetRefCount() > 0 {
   569  			return inUseError(p.Name())
   570  		}
   571  		if p.IsEnabled() {
   572  			return enabledError(p.Name())
   573  		}
   574  	}
   575  
   576  	if p.IsEnabled() {
   577  		if err := pm.disable(p, c); err != nil {
   578  			logrus.Errorf("failed to disable plugin '%s': %s", p.Name(), err)
   579  		}
   580  	}
   581  
   582  	defer func() {
   583  		go pm.GC()
   584  	}()
   585  
   586  	id := p.GetID()
   587  	pluginDir := filepath.Join(pm.config.Root, id)
   588  
   589  	if err := mount.RecursiveUnmount(pluginDir); err != nil {
   590  		return errors.Wrap(err, "error unmounting plugin data")
   591  	}
   592  
   593  	if err := atomicRemoveAll(pluginDir); err != nil {
   594  		return err
   595  	}
   596  
   597  	pm.config.Store.Remove(p)
   598  	pm.config.LogPluginEvent(id, name, "remove")
   599  	pm.publisher.Publish(EventRemove{Plugin: p.PluginObj})
   600  	return nil
   601  }
   602  
   603  // Set sets plugin args
   604  func (pm *Manager) Set(name string, args []string) error {
   605  	p, err := pm.config.Store.GetV2Plugin(name)
   606  	if err != nil {
   607  		return err
   608  	}
   609  	if err := p.Set(args); err != nil {
   610  		return err
   611  	}
   612  	return pm.save(p)
   613  }
   614  
   615  // CreateFromContext creates a plugin from the given pluginDir which contains
   616  // both the rootfs and the config.json and a repoName with optional tag.
   617  func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) (err error) {
   618  	pm.muGC.RLock()
   619  	defer pm.muGC.RUnlock()
   620  
   621  	ref, err := reference.ParseNormalizedNamed(options.RepoName)
   622  	if err != nil {
   623  		return errors.Wrapf(err, "failed to parse reference %v", options.RepoName)
   624  	}
   625  	if _, ok := ref.(reference.Canonical); ok {
   626  		return errors.Errorf("canonical references are not permitted")
   627  	}
   628  	name := reference.FamiliarString(reference.TagNameOnly(ref))
   629  
   630  	if err := pm.config.Store.validateName(name); err != nil { // fast check, real check is in createPlugin()
   631  		return err
   632  	}
   633  
   634  	tmpRootFSDir, err := os.MkdirTemp(pm.tmpDir(), ".rootfs")
   635  	if err != nil {
   636  		return errors.Wrap(err, "failed to create temp directory")
   637  	}
   638  	defer os.RemoveAll(tmpRootFSDir)
   639  
   640  	var configJSON []byte
   641  	rootFS := splitConfigRootFSFromTar(tarCtx, &configJSON)
   642  
   643  	rootFSBlob, err := pm.blobStore.Writer(ctx, content.WithRef(name))
   644  	if err != nil {
   645  		return err
   646  	}
   647  	defer rootFSBlob.Close()
   648  
   649  	gzw := gzip.NewWriter(rootFSBlob)
   650  	rootFSReader := io.TeeReader(rootFS, gzw)
   651  
   652  	if err := chrootarchive.Untar(rootFSReader, tmpRootFSDir, nil); err != nil {
   653  		return err
   654  	}
   655  	if err := rootFS.Close(); err != nil {
   656  		return err
   657  	}
   658  
   659  	if configJSON == nil {
   660  		return errors.New("config not found")
   661  	}
   662  
   663  	if err := gzw.Close(); err != nil {
   664  		return errors.Wrap(err, "error closing gzip writer")
   665  	}
   666  
   667  	var config types.PluginConfig
   668  	if err := json.Unmarshal(configJSON, &config); err != nil {
   669  		return errors.Wrap(err, "failed to parse config")
   670  	}
   671  
   672  	if err := pm.validateConfig(config); err != nil {
   673  		return err
   674  	}
   675  
   676  	pm.mu.Lock()
   677  	defer pm.mu.Unlock()
   678  
   679  	if err := rootFSBlob.Commit(ctx, 0, ""); err != nil {
   680  		return err
   681  	}
   682  	defer func() {
   683  		if err != nil {
   684  			go pm.GC()
   685  		}
   686  	}()
   687  
   688  	config.Rootfs = &types.PluginConfigRootfs{
   689  		Type:    "layers",
   690  		DiffIds: []string{rootFSBlob.Digest().String()},
   691  	}
   692  
   693  	config.DockerVersion = dockerversion.Version
   694  
   695  	configBlob, err := pm.blobStore.Writer(ctx, content.WithRef(name+"-config.json"))
   696  	if err != nil {
   697  		return err
   698  	}
   699  	defer configBlob.Close()
   700  	if err := json.NewEncoder(configBlob).Encode(config); err != nil {
   701  		return errors.Wrap(err, "error encoding json config")
   702  	}
   703  	if err := configBlob.Commit(ctx, 0, ""); err != nil {
   704  		return err
   705  	}
   706  
   707  	configDigest := configBlob.Digest()
   708  	layers := []digest.Digest{rootFSBlob.Digest()}
   709  
   710  	manifest, err := buildManifest(ctx, pm.blobStore, configDigest, layers)
   711  	if err != nil {
   712  		return err
   713  	}
   714  	desc, err := writeManifest(ctx, pm.blobStore, &manifest)
   715  	if err != nil {
   716  		return
   717  	}
   718  
   719  	p, err := pm.createPlugin(name, configDigest, desc.Digest, layers, tmpRootFSDir, nil)
   720  	if err != nil {
   721  		return err
   722  	}
   723  	p.PluginObj.PluginReference = name
   724  
   725  	pm.publisher.Publish(EventCreate{Plugin: p.PluginObj})
   726  	pm.config.LogPluginEvent(p.PluginObj.ID, name, "create")
   727  
   728  	return nil
   729  }
   730  
   731  func (pm *Manager) validateConfig(config types.PluginConfig) error {
   732  	return nil // TODO:
   733  }
   734  
   735  func splitConfigRootFSFromTar(in io.ReadCloser, config *[]byte) io.ReadCloser {
   736  	pr, pw := io.Pipe()
   737  	go func() {
   738  		tarReader := tar.NewReader(in)
   739  		tarWriter := tar.NewWriter(pw)
   740  		defer in.Close()
   741  
   742  		hasRootFS := false
   743  
   744  		for {
   745  			hdr, err := tarReader.Next()
   746  			if err == io.EOF {
   747  				if !hasRootFS {
   748  					pw.CloseWithError(errors.Wrap(err, "no rootfs found"))
   749  					return
   750  				}
   751  				// Signals end of archive.
   752  				tarWriter.Close()
   753  				pw.Close()
   754  				return
   755  			}
   756  			if err != nil {
   757  				pw.CloseWithError(errors.Wrap(err, "failed to read from tar"))
   758  				return
   759  			}
   760  
   761  			content := io.Reader(tarReader)
   762  			name := path.Clean(hdr.Name)
   763  			if path.IsAbs(name) {
   764  				name = name[1:]
   765  			}
   766  			if name == configFileName {
   767  				dt, err := io.ReadAll(content)
   768  				if err != nil {
   769  					pw.CloseWithError(errors.Wrapf(err, "failed to read %s", configFileName))
   770  					return
   771  				}
   772  				*config = dt
   773  			}
   774  			if parts := strings.Split(name, "/"); len(parts) != 0 && parts[0] == rootFSFileName {
   775  				hdr.Name = path.Clean(path.Join(parts[1:]...))
   776  				if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(strings.ToLower(hdr.Linkname), rootFSFileName+"/") {
   777  					hdr.Linkname = hdr.Linkname[len(rootFSFileName)+1:]
   778  				}
   779  				if err := tarWriter.WriteHeader(hdr); err != nil {
   780  					pw.CloseWithError(errors.Wrap(err, "error writing tar header"))
   781  					return
   782  				}
   783  				if _, err := pools.Copy(tarWriter, content); err != nil {
   784  					pw.CloseWithError(errors.Wrap(err, "error copying tar data"))
   785  					return
   786  				}
   787  				hasRootFS = true
   788  			} else {
   789  				io.Copy(io.Discard, content)
   790  			}
   791  		}
   792  	}()
   793  	return pr
   794  }
   795  
   796  func atomicRemoveAll(dir string) error {
   797  	renamed := dir + "-removing"
   798  
   799  	err := os.Rename(dir, renamed)
   800  	switch {
   801  	case os.IsNotExist(err), err == nil:
   802  		// even if `dir` doesn't exist, we can still try and remove `renamed`
   803  	case os.IsExist(err):
   804  		// Some previous remove failed, check if the origin dir exists
   805  		if e := containerfs.EnsureRemoveAll(renamed); e != nil {
   806  			return errors.Wrap(err, "rename target already exists and could not be removed")
   807  		}
   808  		if _, err := os.Stat(dir); os.IsNotExist(err) {
   809  			// origin doesn't exist, nothing left to do
   810  			return nil
   811  		}
   812  
   813  		// attempt to rename again
   814  		if err := os.Rename(dir, renamed); err != nil {
   815  			return errors.Wrap(err, "failed to rename dir for atomic removal")
   816  		}
   817  	default:
   818  		return errors.Wrap(err, "failed to rename dir for atomic removal")
   819  	}
   820  
   821  	if err := containerfs.EnsureRemoveAll(renamed); err != nil {
   822  		os.Rename(renamed, dir)
   823  		return err
   824  	}
   825  	return nil
   826  }