github.com/containerd/Containerd@v1.4.13/content/local/store.go (about)

     1  /*
     2     Copyright The containerd Authors.
     3  
     4     Licensed under the Apache License, Version 2.0 (the "License");
     5     you may not use this file except in compliance with the License.
     6     You may obtain a copy of the License at
     7  
     8         http://www.apache.org/licenses/LICENSE-2.0
     9  
    10     Unless required by applicable law or agreed to in writing, software
    11     distributed under the License is distributed on an "AS IS" BASIS,
    12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13     See the License for the specific language governing permissions and
    14     limitations under the License.
    15  */
    16  
    17  package local
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"io"
    23  	"io/ioutil"
    24  	"math/rand"
    25  	"os"
    26  	"path/filepath"
    27  	"strconv"
    28  	"strings"
    29  	"sync"
    30  	"time"
    31  
    32  	"github.com/containerd/containerd/content"
    33  	"github.com/containerd/containerd/errdefs"
    34  	"github.com/containerd/containerd/filters"
    35  	"github.com/containerd/containerd/log"
    36  	"github.com/sirupsen/logrus"
    37  
    38  	digest "github.com/opencontainers/go-digest"
    39  	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
    40  	"github.com/pkg/errors"
    41  )
    42  
    43  var bufPool = sync.Pool{
    44  	New: func() interface{} {
    45  		buffer := make([]byte, 1<<20)
    46  		return &buffer
    47  	},
    48  }
    49  
    50  // LabelStore is used to store mutable labels for digests
    51  type LabelStore interface {
    52  	// Get returns all the labels for the given digest
    53  	Get(digest.Digest) (map[string]string, error)
    54  
    55  	// Set sets all the labels for a given digest
    56  	Set(digest.Digest, map[string]string) error
    57  
    58  	// Update replaces the given labels for a digest,
    59  	// a key with an empty value removes a label.
    60  	Update(digest.Digest, map[string]string) (map[string]string, error)
    61  }
    62  
    63  // Store is digest-keyed store for content. All data written into the store is
    64  // stored under a verifiable digest.
    65  //
    66  // Store can generally support multi-reader, single-writer ingest of data,
    67  // including resumable ingest.
    68  type store struct {
    69  	root string
    70  	ls   LabelStore
    71  }
    72  
    73  // NewStore returns a local content store
    74  func NewStore(root string) (content.Store, error) {
    75  	return NewLabeledStore(root, nil)
    76  }
    77  
    78  // NewLabeledStore returns a new content store using the provided label store
    79  //
    80  // Note: content stores which are used underneath a metadata store may not
    81  // require labels and should use `NewStore`. `NewLabeledStore` is primarily
    82  // useful for tests or standalone implementations.
    83  func NewLabeledStore(root string, ls LabelStore) (content.Store, error) {
    84  	if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil {
    85  		return nil, err
    86  	}
    87  
    88  	return &store{
    89  		root: root,
    90  		ls:   ls,
    91  	}, nil
    92  }
    93  
    94  func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
    95  	p, err := s.blobPath(dgst)
    96  	if err != nil {
    97  		return content.Info{}, errors.Wrapf(err, "calculating blob info path")
    98  	}
    99  
   100  	fi, err := os.Stat(p)
   101  	if err != nil {
   102  		if os.IsNotExist(err) {
   103  			err = errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst)
   104  		}
   105  
   106  		return content.Info{}, err
   107  	}
   108  	var labels map[string]string
   109  	if s.ls != nil {
   110  		labels, err = s.ls.Get(dgst)
   111  		if err != nil {
   112  			return content.Info{}, err
   113  		}
   114  	}
   115  	return s.info(dgst, fi, labels), nil
   116  }
   117  
   118  func (s *store) info(dgst digest.Digest, fi os.FileInfo, labels map[string]string) content.Info {
   119  	return content.Info{
   120  		Digest:    dgst,
   121  		Size:      fi.Size(),
   122  		CreatedAt: fi.ModTime(),
   123  		UpdatedAt: getATime(fi),
   124  		Labels:    labels,
   125  	}
   126  }
   127  
   128  // ReaderAt returns an io.ReaderAt for the blob.
   129  func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
   130  	p, err := s.blobPath(desc.Digest)
   131  	if err != nil {
   132  		return nil, errors.Wrapf(err, "calculating blob path for ReaderAt")
   133  	}
   134  	fi, err := os.Stat(p)
   135  	if err != nil {
   136  		if !os.IsNotExist(err) {
   137  			return nil, err
   138  		}
   139  
   140  		return nil, errors.Wrapf(errdefs.ErrNotFound, "blob %s expected at %s", desc.Digest, p)
   141  	}
   142  
   143  	fp, err := os.Open(p)
   144  	if err != nil {
   145  		if !os.IsNotExist(err) {
   146  			return nil, err
   147  		}
   148  
   149  		return nil, errors.Wrapf(errdefs.ErrNotFound, "blob %s expected at %s", desc.Digest, p)
   150  	}
   151  
   152  	return sizeReaderAt{size: fi.Size(), fp: fp}, nil
   153  }
   154  
   155  // Delete removes a blob by its digest.
   156  //
   157  // While this is safe to do concurrently, safe exist-removal logic must hold
   158  // some global lock on the store.
   159  func (s *store) Delete(ctx context.Context, dgst digest.Digest) error {
   160  	bp, err := s.blobPath(dgst)
   161  	if err != nil {
   162  		return errors.Wrapf(err, "calculating blob path for delete")
   163  	}
   164  
   165  	if err := os.RemoveAll(bp); err != nil {
   166  		if !os.IsNotExist(err) {
   167  			return err
   168  		}
   169  
   170  		return errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst)
   171  	}
   172  
   173  	return nil
   174  }
   175  
   176  func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
   177  	if s.ls == nil {
   178  		return content.Info{}, errors.Wrapf(errdefs.ErrFailedPrecondition, "update not supported on immutable content store")
   179  	}
   180  
   181  	p, err := s.blobPath(info.Digest)
   182  	if err != nil {
   183  		return content.Info{}, errors.Wrapf(err, "calculating blob path for update")
   184  	}
   185  
   186  	fi, err := os.Stat(p)
   187  	if err != nil {
   188  		if os.IsNotExist(err) {
   189  			err = errors.Wrapf(errdefs.ErrNotFound, "content %v", info.Digest)
   190  		}
   191  
   192  		return content.Info{}, err
   193  	}
   194  
   195  	var (
   196  		all    bool
   197  		labels map[string]string
   198  	)
   199  	if len(fieldpaths) > 0 {
   200  		for _, path := range fieldpaths {
   201  			if strings.HasPrefix(path, "labels.") {
   202  				if labels == nil {
   203  					labels = map[string]string{}
   204  				}
   205  
   206  				key := strings.TrimPrefix(path, "labels.")
   207  				labels[key] = info.Labels[key]
   208  				continue
   209  			}
   210  
   211  			switch path {
   212  			case "labels":
   213  				all = true
   214  				labels = info.Labels
   215  			default:
   216  				return content.Info{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on content info %q", path, info.Digest)
   217  			}
   218  		}
   219  	} else {
   220  		all = true
   221  		labels = info.Labels
   222  	}
   223  
   224  	if all {
   225  		err = s.ls.Set(info.Digest, labels)
   226  	} else {
   227  		labels, err = s.ls.Update(info.Digest, labels)
   228  	}
   229  	if err != nil {
   230  		return content.Info{}, err
   231  	}
   232  
   233  	info = s.info(info.Digest, fi, labels)
   234  	info.UpdatedAt = time.Now()
   235  
   236  	if err := os.Chtimes(p, info.UpdatedAt, info.CreatedAt); err != nil {
   237  		log.G(ctx).WithError(err).Warnf("could not change access time for %s", info.Digest)
   238  	}
   239  
   240  	return info, nil
   241  }
   242  
   243  func (s *store) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error {
   244  	// TODO: Support filters
   245  	root := filepath.Join(s.root, "blobs")
   246  	var alg digest.Algorithm
   247  	return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
   248  		if err != nil {
   249  			return err
   250  		}
   251  		if !fi.IsDir() && !alg.Available() {
   252  			return nil
   253  		}
   254  
   255  		// TODO(stevvooe): There are few more cases with subdirs that should be
   256  		// handled in case the layout gets corrupted. This isn't strict enough
   257  		// and may spew bad data.
   258  
   259  		if path == root {
   260  			return nil
   261  		}
   262  		if filepath.Dir(path) == root {
   263  			alg = digest.Algorithm(filepath.Base(path))
   264  
   265  			if !alg.Available() {
   266  				alg = ""
   267  				return filepath.SkipDir
   268  			}
   269  
   270  			// descending into a hash directory
   271  			return nil
   272  		}
   273  
   274  		dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path))
   275  		if err := dgst.Validate(); err != nil {
   276  			// log error but don't report
   277  			log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path")
   278  			// if we see this, it could mean some sort of corruption of the
   279  			// store or extra paths not expected previously.
   280  		}
   281  
   282  		var labels map[string]string
   283  		if s.ls != nil {
   284  			labels, err = s.ls.Get(dgst)
   285  			if err != nil {
   286  				return err
   287  			}
   288  		}
   289  		return fn(s.info(dgst, fi, labels))
   290  	})
   291  }
   292  
   293  func (s *store) Status(ctx context.Context, ref string) (content.Status, error) {
   294  	return s.status(s.ingestRoot(ref))
   295  }
   296  
   297  func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) {
   298  	fp, err := os.Open(filepath.Join(s.root, "ingest"))
   299  	if err != nil {
   300  		return nil, err
   301  	}
   302  
   303  	defer fp.Close()
   304  
   305  	fis, err := fp.Readdir(-1)
   306  	if err != nil {
   307  		return nil, err
   308  	}
   309  
   310  	filter, err := filters.ParseAll(fs...)
   311  	if err != nil {
   312  		return nil, err
   313  	}
   314  
   315  	var active []content.Status
   316  	for _, fi := range fis {
   317  		p := filepath.Join(s.root, "ingest", fi.Name())
   318  		stat, err := s.status(p)
   319  		if err != nil {
   320  			if !os.IsNotExist(err) {
   321  				return nil, err
   322  			}
   323  
   324  			// TODO(stevvooe): This is a common error if uploads are being
   325  			// completed while making this listing. Need to consider taking a
   326  			// lock on the whole store to coordinate this aspect.
   327  			//
   328  			// Another option is to cleanup downloads asynchronously and
   329  			// coordinate this method with the cleanup process.
   330  			//
   331  			// For now, we just skip them, as they really don't exist.
   332  			continue
   333  		}
   334  
   335  		if filter.Match(adaptStatus(stat)) {
   336  			active = append(active, stat)
   337  		}
   338  	}
   339  
   340  	return active, nil
   341  }
   342  
   343  // WalkStatusRefs is used to walk all status references
   344  // Failed status reads will be logged and ignored, if
   345  // this function is called while references are being altered,
   346  // these error messages may be produced.
   347  func (s *store) WalkStatusRefs(ctx context.Context, fn func(string) error) error {
   348  	fp, err := os.Open(filepath.Join(s.root, "ingest"))
   349  	if err != nil {
   350  		return err
   351  	}
   352  
   353  	defer fp.Close()
   354  
   355  	fis, err := fp.Readdir(-1)
   356  	if err != nil {
   357  		return err
   358  	}
   359  
   360  	for _, fi := range fis {
   361  		rf := filepath.Join(s.root, "ingest", fi.Name(), "ref")
   362  
   363  		ref, err := readFileString(rf)
   364  		if err != nil {
   365  			log.G(ctx).WithError(err).WithField("path", rf).Error("failed to read ingest ref")
   366  			continue
   367  		}
   368  
   369  		if err := fn(ref); err != nil {
   370  			return err
   371  		}
   372  	}
   373  
   374  	return nil
   375  }
   376  
   377  // status works like stat above except uses the path to the ingest.
   378  func (s *store) status(ingestPath string) (content.Status, error) {
   379  	dp := filepath.Join(ingestPath, "data")
   380  	fi, err := os.Stat(dp)
   381  	if err != nil {
   382  		if os.IsNotExist(err) {
   383  			err = errors.Wrap(errdefs.ErrNotFound, err.Error())
   384  		}
   385  		return content.Status{}, err
   386  	}
   387  
   388  	ref, err := readFileString(filepath.Join(ingestPath, "ref"))
   389  	if err != nil {
   390  		if os.IsNotExist(err) {
   391  			err = errors.Wrap(errdefs.ErrNotFound, err.Error())
   392  		}
   393  		return content.Status{}, err
   394  	}
   395  
   396  	startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat"))
   397  	if err != nil {
   398  		return content.Status{}, errors.Wrapf(err, "could not read startedat")
   399  	}
   400  
   401  	updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat"))
   402  	if err != nil {
   403  		return content.Status{}, errors.Wrapf(err, "could not read updatedat")
   404  	}
   405  
   406  	// because we don't write updatedat on every write, the mod time may
   407  	// actually be more up to date.
   408  	if fi.ModTime().After(updatedAt) {
   409  		updatedAt = fi.ModTime()
   410  	}
   411  
   412  	return content.Status{
   413  		Ref:       ref,
   414  		Offset:    fi.Size(),
   415  		Total:     s.total(ingestPath),
   416  		UpdatedAt: updatedAt,
   417  		StartedAt: startedAt,
   418  	}, nil
   419  }
   420  
   421  func adaptStatus(status content.Status) filters.Adaptor {
   422  	return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
   423  		if len(fieldpath) == 0 {
   424  			return "", false
   425  		}
   426  		switch fieldpath[0] {
   427  		case "ref":
   428  			return status.Ref, true
   429  		}
   430  
   431  		return "", false
   432  	})
   433  }
   434  
   435  // total attempts to resolve the total expected size for the write.
   436  func (s *store) total(ingestPath string) int64 {
   437  	totalS, err := readFileString(filepath.Join(ingestPath, "total"))
   438  	if err != nil {
   439  		return 0
   440  	}
   441  
   442  	total, err := strconv.ParseInt(totalS, 10, 64)
   443  	if err != nil {
   444  		// represents a corrupted file, should probably remove.
   445  		return 0
   446  	}
   447  
   448  	return total
   449  }
   450  
   451  // Writer begins or resumes the active writer identified by ref. If the writer
   452  // is already in use, an error is returned. Only one writer may be in use per
   453  // ref at a time.
   454  //
   455  // The argument `ref` is used to uniquely identify a long-lived writer transaction.
   456  func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
   457  	var wOpts content.WriterOpts
   458  	for _, opt := range opts {
   459  		if err := opt(&wOpts); err != nil {
   460  			return nil, err
   461  		}
   462  	}
   463  	// TODO(AkihiroSuda): we could create a random string or one calculated based on the context
   464  	// https://github.com/containerd/containerd/issues/2129#issuecomment-380255019
   465  	if wOpts.Ref == "" {
   466  		return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty")
   467  	}
   468  	var lockErr error
   469  	for count := uint64(0); count < 10; count++ {
   470  		time.Sleep(time.Millisecond * time.Duration(rand.Intn(1<<count)))
   471  		if err := tryLock(wOpts.Ref); err != nil {
   472  			if !errdefs.IsUnavailable(err) {
   473  				return nil, err
   474  			}
   475  
   476  			lockErr = err
   477  		} else {
   478  			lockErr = nil
   479  			break
   480  		}
   481  	}
   482  
   483  	if lockErr != nil {
   484  		return nil, lockErr
   485  	}
   486  
   487  	w, err := s.writer(ctx, wOpts.Ref, wOpts.Desc.Size, wOpts.Desc.Digest)
   488  	if err != nil {
   489  		unlock(wOpts.Ref)
   490  		return nil, err
   491  	}
   492  
   493  	return w, nil // lock is now held by w.
   494  }
   495  
   496  func (s *store) resumeStatus(ref string, total int64, digester digest.Digester) (content.Status, error) {
   497  	path, _, data := s.ingestPaths(ref)
   498  	status, err := s.status(path)
   499  	if err != nil {
   500  		return status, errors.Wrap(err, "failed reading status of resume write")
   501  	}
   502  	if ref != status.Ref {
   503  		// NOTE(stevvooe): This is fairly catastrophic. Either we have some
   504  		// layout corruption or a hash collision for the ref key.
   505  		return status, errors.Errorf("ref key does not match: %v != %v", ref, status.Ref)
   506  	}
   507  
   508  	if total > 0 && status.Total > 0 && total != status.Total {
   509  		return status, errors.Errorf("provided total differs from status: %v != %v", total, status.Total)
   510  	}
   511  
   512  	// TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes
   513  	fp, err := os.Open(data)
   514  	if err != nil {
   515  		return status, err
   516  	}
   517  
   518  	p := bufPool.Get().(*[]byte)
   519  	status.Offset, err = io.CopyBuffer(digester.Hash(), fp, *p)
   520  	bufPool.Put(p)
   521  	fp.Close()
   522  	return status, err
   523  }
   524  
   525  // writer provides the main implementation of the Writer method. The caller
   526  // must hold the lock correctly and release on error if there is a problem.
   527  func (s *store) writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) {
   528  	// TODO(stevvooe): Need to actually store expected here. We have
   529  	// code in the service that shouldn't be dealing with this.
   530  	if expected != "" {
   531  		p, err := s.blobPath(expected)
   532  		if err != nil {
   533  			return nil, errors.Wrap(err, "calculating expected blob path for writer")
   534  		}
   535  		if _, err := os.Stat(p); err == nil {
   536  			return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", expected)
   537  		}
   538  	}
   539  
   540  	path, refp, data := s.ingestPaths(ref)
   541  
   542  	var (
   543  		digester  = digest.Canonical.Digester()
   544  		offset    int64
   545  		startedAt time.Time
   546  		updatedAt time.Time
   547  	)
   548  
   549  	foundValidIngest := false
   550  	// ensure that the ingest path has been created.
   551  	if err := os.Mkdir(path, 0755); err != nil {
   552  		if !os.IsExist(err) {
   553  			return nil, err
   554  		}
   555  		status, err := s.resumeStatus(ref, total, digester)
   556  		if err == nil {
   557  			foundValidIngest = true
   558  			updatedAt = status.UpdatedAt
   559  			startedAt = status.StartedAt
   560  			total = status.Total
   561  			offset = status.Offset
   562  		} else {
   563  			logrus.Infof("failed to resume the status from path %s: %s. will recreate them", path, err.Error())
   564  		}
   565  	}
   566  
   567  	if !foundValidIngest {
   568  		startedAt = time.Now()
   569  		updatedAt = startedAt
   570  
   571  		// the ingest is new, we need to setup the target location.
   572  		// write the ref to a file for later use
   573  		if err := ioutil.WriteFile(refp, []byte(ref), 0666); err != nil {
   574  			return nil, err
   575  		}
   576  
   577  		if err := writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil {
   578  			return nil, err
   579  		}
   580  
   581  		if err := writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil {
   582  			return nil, err
   583  		}
   584  
   585  		if total > 0 {
   586  			if err := ioutil.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil {
   587  				return nil, err
   588  			}
   589  		}
   590  	}
   591  
   592  	fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666)
   593  	if err != nil {
   594  		return nil, errors.Wrap(err, "failed to open data file")
   595  	}
   596  
   597  	if _, err := fp.Seek(offset, io.SeekStart); err != nil {
   598  		return nil, errors.Wrap(err, "could not seek to current write offset")
   599  	}
   600  
   601  	return &writer{
   602  		s:         s,
   603  		fp:        fp,
   604  		ref:       ref,
   605  		path:      path,
   606  		offset:    offset,
   607  		total:     total,
   608  		digester:  digester,
   609  		startedAt: startedAt,
   610  		updatedAt: updatedAt,
   611  	}, nil
   612  }
   613  
   614  // Abort an active transaction keyed by ref. If the ingest is active, it will
   615  // be cancelled. Any resources associated with the ingest will be cleaned.
   616  func (s *store) Abort(ctx context.Context, ref string) error {
   617  	root := s.ingestRoot(ref)
   618  	if err := os.RemoveAll(root); err != nil {
   619  		if os.IsNotExist(err) {
   620  			return errors.Wrapf(errdefs.ErrNotFound, "ingest ref %q", ref)
   621  		}
   622  
   623  		return err
   624  	}
   625  
   626  	return nil
   627  }
   628  
   629  func (s *store) blobPath(dgst digest.Digest) (string, error) {
   630  	if err := dgst.Validate(); err != nil {
   631  		return "", errors.Wrapf(errdefs.ErrInvalidArgument, "cannot calculate blob path from invalid digest: %v", err)
   632  	}
   633  
   634  	return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex()), nil
   635  }
   636  
   637  func (s *store) ingestRoot(ref string) string {
   638  	// we take a digest of the ref to keep the ingest paths constant length.
   639  	// Note that this is not the current or potential digest of incoming content.
   640  	dgst := digest.FromString(ref)
   641  	return filepath.Join(s.root, "ingest", dgst.Hex())
   642  }
   643  
   644  // ingestPaths are returned. The paths are the following:
   645  //
   646  // - root: entire ingest directory
   647  // - ref: name of the starting ref, must be unique
   648  // - data: file where data is written
   649  //
   650  func (s *store) ingestPaths(ref string) (string, string, string) {
   651  	var (
   652  		fp = s.ingestRoot(ref)
   653  		rp = filepath.Join(fp, "ref")
   654  		dp = filepath.Join(fp, "data")
   655  	)
   656  
   657  	return fp, rp, dp
   658  }
   659  
   660  func readFileString(path string) (string, error) {
   661  	p, err := ioutil.ReadFile(path)
   662  	return string(p), err
   663  }
   664  
   665  // readFileTimestamp reads a file with just a timestamp present.
   666  func readFileTimestamp(p string) (time.Time, error) {
   667  	b, err := ioutil.ReadFile(p)
   668  	if err != nil {
   669  		if os.IsNotExist(err) {
   670  			err = errors.Wrap(errdefs.ErrNotFound, err.Error())
   671  		}
   672  		return time.Time{}, err
   673  	}
   674  
   675  	var t time.Time
   676  	if err := t.UnmarshalText(b); err != nil {
   677  		return time.Time{}, errors.Wrapf(err, "could not parse timestamp file %v", p)
   678  	}
   679  
   680  	return t, nil
   681  }
   682  
   683  func writeTimestampFile(p string, t time.Time) error {
   684  	b, err := t.MarshalText()
   685  	if err != nil {
   686  		return err
   687  	}
   688  	return atomicWrite(p, b, 0666)
   689  }
   690  
   691  func atomicWrite(path string, data []byte, mode os.FileMode) error {
   692  	tmp := fmt.Sprintf("%s.tmp", path)
   693  	f, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, mode)
   694  	if err != nil {
   695  		return errors.Wrap(err, "create tmp file")
   696  	}
   697  	_, err = f.Write(data)
   698  	f.Close()
   699  	if err != nil {
   700  		return errors.Wrap(err, "write atomic data")
   701  	}
   702  	return os.Rename(tmp, path)
   703  }