github.com/kaisenlinux/docker.io@v0.0.0-20230510090727-ea55db55fac7/engine/plugin/backend_linux.go (about) 1 package plugin // import "github.com/docker/docker/plugin" 2 3 import ( 4 "archive/tar" 5 "bytes" 6 "compress/gzip" 7 "context" 8 "encoding/json" 9 "io" 10 "net/http" 11 "os" 12 "path" 13 "path/filepath" 14 "strings" 15 "time" 16 17 "github.com/containerd/containerd/content" 18 "github.com/containerd/containerd/images" 19 "github.com/containerd/containerd/platforms" 20 "github.com/containerd/containerd/remotes" 21 "github.com/containerd/containerd/remotes/docker" 22 "github.com/docker/distribution/manifest/schema2" 23 "github.com/docker/distribution/reference" 24 "github.com/docker/docker/api/types" 25 "github.com/docker/docker/api/types/filters" 26 "github.com/docker/docker/dockerversion" 27 "github.com/docker/docker/errdefs" 28 "github.com/docker/docker/pkg/authorization" 29 "github.com/docker/docker/pkg/chrootarchive" 30 "github.com/docker/docker/pkg/pools" 31 "github.com/docker/docker/pkg/progress" 32 "github.com/docker/docker/pkg/stringid" 33 "github.com/docker/docker/pkg/system" 34 v2 "github.com/docker/docker/plugin/v2" 35 "github.com/moby/sys/mount" 36 digest "github.com/opencontainers/go-digest" 37 specs "github.com/opencontainers/image-spec/specs-go/v1" 38 "github.com/pkg/errors" 39 "github.com/sirupsen/logrus" 40 ) 41 42 var acceptedPluginFilterTags = map[string]bool{ 43 "enabled": true, 44 "capability": true, 45 } 46 47 // Disable deactivates a plugin. This means resources (volumes, networks) cant use them. 48 func (pm *Manager) Disable(refOrID string, config *types.PluginDisableConfig) error { 49 p, err := pm.config.Store.GetV2Plugin(refOrID) 50 if err != nil { 51 return err 52 } 53 pm.mu.RLock() 54 c := pm.cMap[p] 55 pm.mu.RUnlock() 56 57 if !config.ForceDisable && p.GetRefCount() > 0 { 58 return errors.WithStack(inUseError(p.Name())) 59 } 60 61 for _, typ := range p.GetTypes() { 62 if typ.Capability == authorization.AuthZApiImplements { 63 pm.config.AuthzMiddleware.RemovePlugin(p.Name()) 64 } 65 } 66 67 if err := pm.disable(p, c); err != nil { 68 return err 69 } 70 pm.publisher.Publish(EventDisable{Plugin: p.PluginObj}) 71 pm.config.LogPluginEvent(p.GetID(), refOrID, "disable") 72 return nil 73 } 74 75 // Enable activates a plugin, which implies that they are ready to be used by containers. 76 func (pm *Manager) Enable(refOrID string, config *types.PluginEnableConfig) error { 77 p, err := pm.config.Store.GetV2Plugin(refOrID) 78 if err != nil { 79 return err 80 } 81 82 c := &controller{timeoutInSecs: config.Timeout} 83 if err := pm.enable(p, c, false); err != nil { 84 return err 85 } 86 pm.publisher.Publish(EventEnable{Plugin: p.PluginObj}) 87 pm.config.LogPluginEvent(p.GetID(), refOrID, "enable") 88 return nil 89 } 90 91 // Inspect examines a plugin config 92 func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) { 93 p, err := pm.config.Store.GetV2Plugin(refOrID) 94 if err != nil { 95 return nil, err 96 } 97 98 return &p.PluginObj, nil 99 } 100 101 func computePrivileges(c types.PluginConfig) types.PluginPrivileges { 102 var privileges types.PluginPrivileges 103 if c.Network.Type != "null" && c.Network.Type != "bridge" && c.Network.Type != "" { 104 privileges = append(privileges, types.PluginPrivilege{ 105 Name: "network", 106 Description: "permissions to access a network", 107 Value: []string{c.Network.Type}, 108 }) 109 } 110 if c.IpcHost { 111 privileges = append(privileges, types.PluginPrivilege{ 112 Name: "host ipc namespace", 113 Description: "allow access to host ipc namespace", 114 Value: []string{"true"}, 115 }) 116 } 117 if c.PidHost { 118 privileges = append(privileges, types.PluginPrivilege{ 119 Name: "host pid namespace", 120 Description: "allow access to host pid namespace", 121 Value: []string{"true"}, 122 }) 123 } 124 for _, mount := range c.Mounts { 125 if mount.Source != nil { 126 privileges = append(privileges, types.PluginPrivilege{ 127 Name: "mount", 128 Description: "host path to mount", 129 Value: []string{*mount.Source}, 130 }) 131 } 132 } 133 for _, device := range c.Linux.Devices { 134 if device.Path != nil { 135 privileges = append(privileges, types.PluginPrivilege{ 136 Name: "device", 137 Description: "host device to access", 138 Value: []string{*device.Path}, 139 }) 140 } 141 } 142 if c.Linux.AllowAllDevices { 143 privileges = append(privileges, types.PluginPrivilege{ 144 Name: "allow-all-devices", 145 Description: "allow 'rwm' access to all devices", 146 Value: []string{"true"}, 147 }) 148 } 149 if len(c.Linux.Capabilities) > 0 { 150 privileges = append(privileges, types.PluginPrivilege{ 151 Name: "capabilities", 152 Description: "list of additional capabilities required", 153 Value: c.Linux.Capabilities, 154 }) 155 } 156 157 return privileges 158 } 159 160 // Privileges pulls a plugin config and computes the privileges required to install it. 161 func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) { 162 var ( 163 config types.PluginConfig 164 configSeen bool 165 ) 166 167 h := func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) { 168 switch desc.MediaType { 169 case schema2.MediaTypeManifest, specs.MediaTypeImageManifest: 170 data, err := content.ReadBlob(ctx, pm.blobStore, desc) 171 if err != nil { 172 return nil, errors.Wrapf(err, "error reading image manifest from blob store for %s", ref) 173 } 174 175 var m specs.Manifest 176 if err := json.Unmarshal(data, &m); err != nil { 177 return nil, errors.Wrapf(err, "error unmarshaling image manifest for %s", ref) 178 } 179 return []specs.Descriptor{m.Config}, nil 180 case schema2.MediaTypePluginConfig: 181 configSeen = true 182 data, err := content.ReadBlob(ctx, pm.blobStore, desc) 183 if err != nil { 184 return nil, errors.Wrapf(err, "error reading plugin config from blob store for %s", ref) 185 } 186 187 if err := json.Unmarshal(data, &config); err != nil { 188 return nil, errors.Wrapf(err, "error unmarshaling plugin config for %s", ref) 189 } 190 } 191 192 return nil, nil 193 } 194 195 if err := pm.fetch(ctx, ref, authConfig, progress.DiscardOutput(), metaHeader, images.HandlerFunc(h)); err != nil { 196 return types.PluginPrivileges{}, nil 197 } 198 199 if !configSeen { 200 return types.PluginPrivileges{}, errors.Errorf("did not find plugin config for specified reference %s", ref) 201 } 202 203 return computePrivileges(config), nil 204 } 205 206 // Upgrade upgrades a plugin 207 // 208 // TODO: replace reference package usage with simpler url.Parse semantics 209 func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) { 210 p, err := pm.config.Store.GetV2Plugin(name) 211 if err != nil { 212 return err 213 } 214 215 if p.IsEnabled() { 216 return errors.Wrap(enabledError(p.Name()), "plugin must be disabled before upgrading") 217 } 218 219 // revalidate because Pull is public 220 if _, err := reference.ParseNormalizedNamed(name); err != nil { 221 return errors.Wrapf(errdefs.InvalidParameter(err), "failed to parse %q", name) 222 } 223 224 pm.muGC.RLock() 225 defer pm.muGC.RUnlock() 226 227 tmpRootFSDir, err := os.MkdirTemp(pm.tmpDir(), ".rootfs") 228 if err != nil { 229 return errors.Wrap(err, "error creating tmp dir for plugin rootfs") 230 } 231 232 var md fetchMeta 233 234 ctx, cancel := context.WithCancel(ctx) 235 out, waitProgress := setupProgressOutput(outStream, cancel) 236 defer waitProgress() 237 238 if err := pm.fetch(ctx, ref, authConfig, out, metaHeader, storeFetchMetadata(&md), childrenHandler(pm.blobStore), applyLayer(pm.blobStore, tmpRootFSDir, out)); err != nil { 239 return err 240 } 241 pm.config.LogPluginEvent(reference.FamiliarString(ref), name, "pull") 242 243 if err := validateFetchedMetadata(md); err != nil { 244 return err 245 } 246 247 if err := pm.upgradePlugin(p, md.config, md.manifest, md.blobs, tmpRootFSDir, &privileges); err != nil { 248 return err 249 } 250 p.PluginObj.PluginReference = ref.String() 251 return nil 252 } 253 254 // Pull pulls a plugin, check if the correct privileges are provided and install the plugin. 255 // 256 // TODO: replace reference package usage with simpler url.Parse semantics 257 func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer, opts ...CreateOpt) (err error) { 258 pm.muGC.RLock() 259 defer pm.muGC.RUnlock() 260 261 // revalidate because Pull is public 262 nameref, err := reference.ParseNormalizedNamed(name) 263 if err != nil { 264 return errors.Wrapf(errdefs.InvalidParameter(err), "failed to parse %q", name) 265 } 266 name = reference.FamiliarString(reference.TagNameOnly(nameref)) 267 268 if err := pm.config.Store.validateName(name); err != nil { 269 return errdefs.InvalidParameter(err) 270 } 271 272 tmpRootFSDir, err := os.MkdirTemp(pm.tmpDir(), ".rootfs") 273 if err != nil { 274 return errors.Wrap(errdefs.System(err), "error preparing upgrade") 275 } 276 defer os.RemoveAll(tmpRootFSDir) 277 278 var md fetchMeta 279 280 ctx, cancel := context.WithCancel(ctx) 281 out, waitProgress := setupProgressOutput(outStream, cancel) 282 defer waitProgress() 283 284 if err := pm.fetch(ctx, ref, authConfig, out, metaHeader, storeFetchMetadata(&md), childrenHandler(pm.blobStore), applyLayer(pm.blobStore, tmpRootFSDir, out)); err != nil { 285 return err 286 } 287 pm.config.LogPluginEvent(reference.FamiliarString(ref), name, "pull") 288 289 if err := validateFetchedMetadata(md); err != nil { 290 return err 291 } 292 293 refOpt := func(p *v2.Plugin) { 294 p.PluginObj.PluginReference = ref.String() 295 } 296 optsList := make([]CreateOpt, 0, len(opts)+1) 297 optsList = append(optsList, opts...) 298 optsList = append(optsList, refOpt) 299 300 // TODO: tmpRootFSDir is empty but should have layers in it 301 p, err := pm.createPlugin(name, md.config, md.manifest, md.blobs, tmpRootFSDir, &privileges, optsList...) 302 if err != nil { 303 return err 304 } 305 306 pm.publisher.Publish(EventCreate{Plugin: p.PluginObj}) 307 308 return nil 309 } 310 311 // List displays the list of plugins and associated metadata. 312 func (pm *Manager) List(pluginFilters filters.Args) ([]types.Plugin, error) { 313 if err := pluginFilters.Validate(acceptedPluginFilterTags); err != nil { 314 return nil, err 315 } 316 317 enabledOnly := false 318 disabledOnly := false 319 if pluginFilters.Contains("enabled") { 320 if pluginFilters.ExactMatch("enabled", "true") { 321 enabledOnly = true 322 } else if pluginFilters.ExactMatch("enabled", "false") { 323 disabledOnly = true 324 } else { 325 return nil, invalidFilter{"enabled", pluginFilters.Get("enabled")} 326 } 327 } 328 329 plugins := pm.config.Store.GetAll() 330 out := make([]types.Plugin, 0, len(plugins)) 331 332 next: 333 for _, p := range plugins { 334 if enabledOnly && !p.PluginObj.Enabled { 335 continue 336 } 337 if disabledOnly && p.PluginObj.Enabled { 338 continue 339 } 340 if pluginFilters.Contains("capability") { 341 for _, f := range p.GetTypes() { 342 if !pluginFilters.Match("capability", f.Capability) { 343 continue next 344 } 345 } 346 } 347 out = append(out, p.PluginObj) 348 } 349 return out, nil 350 } 351 352 // Push pushes a plugin to the registry. 353 func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, outStream io.Writer) error { 354 p, err := pm.config.Store.GetV2Plugin(name) 355 if err != nil { 356 return err 357 } 358 359 ref, err := reference.ParseNormalizedNamed(p.Name()) 360 if err != nil { 361 return errors.Wrapf(err, "plugin has invalid name %v for push", p.Name()) 362 } 363 364 statusTracker := docker.NewInMemoryTracker() 365 366 resolver, err := pm.newResolver(ctx, statusTracker, authConfig, metaHeader, false) 367 if err != nil { 368 return err 369 } 370 371 pusher, err := resolver.Pusher(ctx, ref.String()) 372 if err != nil { 373 374 return errors.Wrap(err, "error creating plugin pusher") 375 } 376 377 pj := newPushJobs(statusTracker) 378 379 ctx, cancel := context.WithCancel(ctx) 380 out, waitProgress := setupProgressOutput(outStream, cancel) 381 defer waitProgress() 382 383 progressHandler := images.HandlerFunc(func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) { 384 logrus.WithField("mediaType", desc.MediaType).WithField("digest", desc.Digest.String()).Debug("Preparing to push plugin layer") 385 id := stringid.TruncateID(desc.Digest.String()) 386 pj.add(remotes.MakeRefKey(ctx, desc), id) 387 progress.Update(out, id, "Preparing") 388 return nil, nil 389 }) 390 391 desc, err := pm.getManifestDescriptor(ctx, p) 392 if err != nil { 393 return errors.Wrap(err, "error reading plugin manifest") 394 } 395 396 progress.Messagef(out, "", "The push refers to repository [%s]", reference.FamiliarName(ref)) 397 398 // TODO: If a layer already exists on the registry, the progress output just says "Preparing" 399 go func() { 400 timer := time.NewTimer(100 * time.Millisecond) 401 defer timer.Stop() 402 if !timer.Stop() { 403 <-timer.C 404 } 405 var statuses []contentStatus 406 for { 407 timer.Reset(100 * time.Millisecond) 408 select { 409 case <-ctx.Done(): 410 return 411 case <-timer.C: 412 statuses = pj.status() 413 } 414 415 for _, s := range statuses { 416 out.WriteProgress(progress.Progress{ID: s.Ref, Current: s.Offset, Total: s.Total, Action: s.Status, LastUpdate: s.Offset == s.Total}) 417 } 418 } 419 }() 420 421 // Make sure we can authenticate the request since the auth scope for plugin repos is different than a normal repo. 422 ctx = docker.WithScope(ctx, scope(ref, true)) 423 if err := remotes.PushContent(ctx, pusher, desc, pm.blobStore, nil, func(h images.Handler) images.Handler { 424 return images.Handlers(progressHandler, h) 425 }); err != nil { 426 // Try fallback to http. 427 // This is needed because the containerd pusher will only attempt the first registry config we pass, which would 428 // typically be https. 429 // If there are no http-only host configs found we'll error out anyway. 430 resolver, _ := pm.newResolver(ctx, statusTracker, authConfig, metaHeader, true) 431 if resolver != nil { 432 pusher, _ := resolver.Pusher(ctx, ref.String()) 433 if pusher != nil { 434 logrus.WithField("ref", ref).Debug("Re-attmpting push with http-fallback") 435 err2 := remotes.PushContent(ctx, pusher, desc, pm.blobStore, nil, func(h images.Handler) images.Handler { 436 return images.Handlers(progressHandler, h) 437 }) 438 if err2 == nil { 439 err = nil 440 } else { 441 logrus.WithError(err2).WithField("ref", ref).Debug("Error while attempting push with http-fallback") 442 } 443 } 444 } 445 if err != nil { 446 return errors.Wrap(err, "error pushing plugin") 447 } 448 } 449 450 // For blobs that already exist in the registry we need to make sure to update the progress otherwise it will just say "pending" 451 // TODO: How to check if the layer already exists? Is it worth it? 452 for _, j := range pj.jobs { 453 progress.Update(out, pj.names[j], "Upload complete") 454 } 455 456 // Signal the client for content trust verification 457 progress.Aux(out, types.PushResult{Tag: ref.(reference.Tagged).Tag(), Digest: desc.Digest.String(), Size: int(desc.Size)}) 458 459 return nil 460 } 461 462 // manifest wraps an OCI manifest, because... 463 // Historically the registry does not support plugins unless the media type on the manifest is specifically schema2.MediaTypeManifest 464 // So the OCI manifest media type is not supported. 465 // Additionally, there is extra validation for the docker schema2 manifest than there is a mediatype set on the manifest itself 466 // even though this is set on the descriptor 467 // The OCI types do not have this field. 468 type manifest struct { 469 specs.Manifest 470 MediaType string `json:"mediaType,omitempty"` 471 } 472 473 func buildManifest(ctx context.Context, s content.Manager, config digest.Digest, layers []digest.Digest) (manifest, error) { 474 var m manifest 475 m.MediaType = images.MediaTypeDockerSchema2Manifest 476 m.SchemaVersion = 2 477 478 configInfo, err := s.Info(ctx, config) 479 if err != nil { 480 return m, errors.Wrapf(err, "error reading plugin config content for digest %s", config) 481 } 482 m.Config = specs.Descriptor{ 483 MediaType: mediaTypePluginConfig, 484 Size: configInfo.Size, 485 Digest: configInfo.Digest, 486 } 487 488 for _, l := range layers { 489 info, err := s.Info(ctx, l) 490 if err != nil { 491 return m, errors.Wrapf(err, "error fetching info for content digest %s", l) 492 } 493 m.Layers = append(m.Layers, specs.Descriptor{ 494 MediaType: images.MediaTypeDockerSchema2LayerGzip, // TODO: This is assuming everything is a gzip compressed layer, but that may not be true. 495 Digest: l, 496 Size: info.Size, 497 }) 498 } 499 return m, nil 500 } 501 502 // getManifestDescriptor gets the OCI descriptor for a manifest 503 // It will generate a manifest if one does not exist 504 func (pm *Manager) getManifestDescriptor(ctx context.Context, p *v2.Plugin) (specs.Descriptor, error) { 505 logger := logrus.WithField("plugin", p.Name()).WithField("digest", p.Manifest) 506 if p.Manifest != "" { 507 info, err := pm.blobStore.Info(ctx, p.Manifest) 508 if err == nil { 509 desc := specs.Descriptor{ 510 Size: info.Size, 511 Digest: info.Digest, 512 MediaType: images.MediaTypeDockerSchema2Manifest, 513 } 514 return desc, nil 515 } 516 logger.WithError(err).Debug("Could not find plugin manifest in content store") 517 } else { 518 logger.Info("Plugin does not have manifest digest") 519 } 520 logger.Info("Building a new plugin manifest") 521 522 manifest, err := buildManifest(ctx, pm.blobStore, p.Config, p.Blobsums) 523 if err != nil { 524 return specs.Descriptor{}, err 525 } 526 527 desc, err := writeManifest(ctx, pm.blobStore, &manifest) 528 if err != nil { 529 return desc, err 530 } 531 532 if err := pm.save(p); err != nil { 533 logger.WithError(err).Error("Could not save plugin with manifest digest") 534 } 535 return desc, nil 536 } 537 538 func writeManifest(ctx context.Context, cs content.Store, m *manifest) (specs.Descriptor, error) { 539 platform := platforms.DefaultSpec() 540 desc := specs.Descriptor{ 541 MediaType: images.MediaTypeDockerSchema2Manifest, 542 Platform: &platform, 543 } 544 data, err := json.Marshal(m) 545 if err != nil { 546 return desc, errors.Wrap(err, "error encoding manifest") 547 } 548 desc.Digest = digest.FromBytes(data) 549 desc.Size = int64(len(data)) 550 551 if err := content.WriteBlob(ctx, cs, remotes.MakeRefKey(ctx, desc), bytes.NewReader(data), desc); err != nil { 552 return desc, errors.Wrap(err, "error writing plugin manifest") 553 } 554 return desc, nil 555 } 556 557 // Remove deletes plugin's root directory. 558 func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error { 559 p, err := pm.config.Store.GetV2Plugin(name) 560 pm.mu.RLock() 561 c := pm.cMap[p] 562 pm.mu.RUnlock() 563 564 if err != nil { 565 return err 566 } 567 568 if !config.ForceRemove { 569 if p.GetRefCount() > 0 { 570 return inUseError(p.Name()) 571 } 572 if p.IsEnabled() { 573 return enabledError(p.Name()) 574 } 575 } 576 577 if p.IsEnabled() { 578 if err := pm.disable(p, c); err != nil { 579 logrus.Errorf("failed to disable plugin '%s': %s", p.Name(), err) 580 } 581 } 582 583 defer func() { 584 go pm.GC() 585 }() 586 587 id := p.GetID() 588 pluginDir := filepath.Join(pm.config.Root, id) 589 590 if err := mount.RecursiveUnmount(pluginDir); err != nil { 591 return errors.Wrap(err, "error unmounting plugin data") 592 } 593 594 if err := atomicRemoveAll(pluginDir); err != nil { 595 return err 596 } 597 598 pm.config.Store.Remove(p) 599 pm.config.LogPluginEvent(id, name, "remove") 600 pm.publisher.Publish(EventRemove{Plugin: p.PluginObj}) 601 return nil 602 } 603 604 // Set sets plugin args 605 func (pm *Manager) Set(name string, args []string) error { 606 p, err := pm.config.Store.GetV2Plugin(name) 607 if err != nil { 608 return err 609 } 610 if err := p.Set(args); err != nil { 611 return err 612 } 613 return pm.save(p) 614 } 615 616 // CreateFromContext creates a plugin from the given pluginDir which contains 617 // both the rootfs and the config.json and a repoName with optional tag. 618 func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) (err error) { 619 pm.muGC.RLock() 620 defer pm.muGC.RUnlock() 621 622 ref, err := reference.ParseNormalizedNamed(options.RepoName) 623 if err != nil { 624 return errors.Wrapf(err, "failed to parse reference %v", options.RepoName) 625 } 626 if _, ok := ref.(reference.Canonical); ok { 627 return errors.Errorf("canonical references are not permitted") 628 } 629 name := reference.FamiliarString(reference.TagNameOnly(ref)) 630 631 if err := pm.config.Store.validateName(name); err != nil { // fast check, real check is in createPlugin() 632 return err 633 } 634 635 tmpRootFSDir, err := os.MkdirTemp(pm.tmpDir(), ".rootfs") 636 if err != nil { 637 return errors.Wrap(err, "failed to create temp directory") 638 } 639 defer os.RemoveAll(tmpRootFSDir) 640 641 var configJSON []byte 642 rootFS := splitConfigRootFSFromTar(tarCtx, &configJSON) 643 644 rootFSBlob, err := pm.blobStore.Writer(ctx, content.WithRef(name)) 645 if err != nil { 646 return err 647 } 648 defer rootFSBlob.Close() 649 650 gzw := gzip.NewWriter(rootFSBlob) 651 rootFSReader := io.TeeReader(rootFS, gzw) 652 653 if err := chrootarchive.Untar(rootFSReader, tmpRootFSDir, nil); err != nil { 654 return err 655 } 656 if err := rootFS.Close(); err != nil { 657 return err 658 } 659 660 if configJSON == nil { 661 return errors.New("config not found") 662 } 663 664 if err := gzw.Close(); err != nil { 665 return errors.Wrap(err, "error closing gzip writer") 666 } 667 668 var config types.PluginConfig 669 if err := json.Unmarshal(configJSON, &config); err != nil { 670 return errors.Wrap(err, "failed to parse config") 671 } 672 673 if err := pm.validateConfig(config); err != nil { 674 return err 675 } 676 677 pm.mu.Lock() 678 defer pm.mu.Unlock() 679 680 if err := rootFSBlob.Commit(ctx, 0, ""); err != nil { 681 return err 682 } 683 defer func() { 684 if err != nil { 685 go pm.GC() 686 } 687 }() 688 689 config.Rootfs = &types.PluginConfigRootfs{ 690 Type: "layers", 691 DiffIds: []string{rootFSBlob.Digest().String()}, 692 } 693 694 config.DockerVersion = dockerversion.Version 695 696 configBlob, err := pm.blobStore.Writer(ctx, content.WithRef(name+"-config.json")) 697 if err != nil { 698 return err 699 } 700 defer configBlob.Close() 701 if err := json.NewEncoder(configBlob).Encode(config); err != nil { 702 return errors.Wrap(err, "error encoding json config") 703 } 704 if err := configBlob.Commit(ctx, 0, ""); err != nil { 705 return err 706 } 707 708 configDigest := configBlob.Digest() 709 layers := []digest.Digest{rootFSBlob.Digest()} 710 711 manifest, err := buildManifest(ctx, pm.blobStore, configDigest, layers) 712 if err != nil { 713 return err 714 } 715 desc, err := writeManifest(ctx, pm.blobStore, &manifest) 716 if err != nil { 717 return 718 } 719 720 p, err := pm.createPlugin(name, configDigest, desc.Digest, layers, tmpRootFSDir, nil) 721 if err != nil { 722 return err 723 } 724 p.PluginObj.PluginReference = name 725 726 pm.publisher.Publish(EventCreate{Plugin: p.PluginObj}) 727 pm.config.LogPluginEvent(p.PluginObj.ID, name, "create") 728 729 return nil 730 } 731 732 func (pm *Manager) validateConfig(config types.PluginConfig) error { 733 return nil // TODO: 734 } 735 736 func splitConfigRootFSFromTar(in io.ReadCloser, config *[]byte) io.ReadCloser { 737 pr, pw := io.Pipe() 738 go func() { 739 tarReader := tar.NewReader(in) 740 tarWriter := tar.NewWriter(pw) 741 defer in.Close() 742 743 hasRootFS := false 744 745 for { 746 hdr, err := tarReader.Next() 747 if err == io.EOF { 748 if !hasRootFS { 749 pw.CloseWithError(errors.Wrap(err, "no rootfs found")) 750 return 751 } 752 // Signals end of archive. 753 tarWriter.Close() 754 pw.Close() 755 return 756 } 757 if err != nil { 758 pw.CloseWithError(errors.Wrap(err, "failed to read from tar")) 759 return 760 } 761 762 content := io.Reader(tarReader) 763 name := path.Clean(hdr.Name) 764 if path.IsAbs(name) { 765 name = name[1:] 766 } 767 if name == configFileName { 768 dt, err := io.ReadAll(content) 769 if err != nil { 770 pw.CloseWithError(errors.Wrapf(err, "failed to read %s", configFileName)) 771 return 772 } 773 *config = dt 774 } 775 if parts := strings.Split(name, "/"); len(parts) != 0 && parts[0] == rootFSFileName { 776 hdr.Name = path.Clean(path.Join(parts[1:]...)) 777 if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(strings.ToLower(hdr.Linkname), rootFSFileName+"/") { 778 hdr.Linkname = hdr.Linkname[len(rootFSFileName)+1:] 779 } 780 if err := tarWriter.WriteHeader(hdr); err != nil { 781 pw.CloseWithError(errors.Wrap(err, "error writing tar header")) 782 return 783 } 784 if _, err := pools.Copy(tarWriter, content); err != nil { 785 pw.CloseWithError(errors.Wrap(err, "error copying tar data")) 786 return 787 } 788 hasRootFS = true 789 } else { 790 io.Copy(io.Discard, content) 791 } 792 } 793 }() 794 return pr 795 } 796 797 func atomicRemoveAll(dir string) error { 798 renamed := dir + "-removing" 799 800 err := os.Rename(dir, renamed) 801 switch { 802 case os.IsNotExist(err), err == nil: 803 // even if `dir` doesn't exist, we can still try and remove `renamed` 804 case os.IsExist(err): 805 // Some previous remove failed, check if the origin dir exists 806 if e := system.EnsureRemoveAll(renamed); e != nil { 807 return errors.Wrap(err, "rename target already exists and could not be removed") 808 } 809 if _, err := os.Stat(dir); os.IsNotExist(err) { 810 // origin doesn't exist, nothing left to do 811 return nil 812 } 813 814 // attempt to rename again 815 if err := os.Rename(dir, renamed); err != nil { 816 return errors.Wrap(err, "failed to rename dir for atomic removal") 817 } 818 default: 819 return errors.Wrap(err, "failed to rename dir for atomic removal") 820 } 821 822 if err := system.EnsureRemoveAll(renamed); err != nil { 823 os.Rename(renamed, dir) 824 return err 825 } 826 return nil 827 }