github.com/lalkh/containerd@v1.4.3/content/local/writer.go (about) 1 /* 2 Copyright The containerd Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package local 18 19 import ( 20 "context" 21 "io" 22 "os" 23 "path/filepath" 24 "runtime" 25 "time" 26 27 "github.com/containerd/containerd/content" 28 "github.com/containerd/containerd/errdefs" 29 "github.com/containerd/containerd/log" 30 "github.com/opencontainers/go-digest" 31 "github.com/pkg/errors" 32 ) 33 34 // writer represents a write transaction against the blob store. 35 type writer struct { 36 s *store 37 fp *os.File // opened data file 38 path string // path to writer dir 39 ref string // ref key 40 offset int64 41 total int64 42 digester digest.Digester 43 startedAt time.Time 44 updatedAt time.Time 45 } 46 47 func (w *writer) Status() (content.Status, error) { 48 return content.Status{ 49 Ref: w.ref, 50 Offset: w.offset, 51 Total: w.total, 52 StartedAt: w.startedAt, 53 UpdatedAt: w.updatedAt, 54 }, nil 55 } 56 57 // Digest returns the current digest of the content, up to the current write. 58 // 59 // Cannot be called concurrently with `Write`. 60 func (w *writer) Digest() digest.Digest { 61 return w.digester.Digest() 62 } 63 64 // Write p to the transaction. 65 // 66 // Note that writes are unbuffered to the backing file. When writing, it is 67 // recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer. 68 func (w *writer) Write(p []byte) (n int, err error) { 69 n, err = w.fp.Write(p) 70 w.digester.Hash().Write(p[:n]) 71 w.offset += int64(len(p)) 72 w.updatedAt = time.Now() 73 return n, err 74 } 75 76 func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { 77 // Ensure even on error the writer is fully closed 78 defer unlock(w.ref) 79 80 var base content.Info 81 for _, opt := range opts { 82 if err := opt(&base); err != nil { 83 return err 84 } 85 } 86 87 fp := w.fp 88 w.fp = nil 89 90 if fp == nil { 91 return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer") 92 } 93 94 if err := fp.Sync(); err != nil { 95 fp.Close() 96 return errors.Wrap(err, "sync failed") 97 } 98 99 fi, err := fp.Stat() 100 closeErr := fp.Close() 101 if err != nil { 102 return errors.Wrap(err, "stat on ingest file failed") 103 } 104 if closeErr != nil { 105 return errors.Wrap(err, "failed to close ingest file") 106 } 107 108 if size > 0 && size != fi.Size() { 109 return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fi.Size(), size) 110 } 111 112 dgst := w.digester.Digest() 113 if expected != "" && expected != dgst { 114 return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected) 115 } 116 117 var ( 118 ingest = filepath.Join(w.path, "data") 119 target, _ = w.s.blobPath(dgst) // ignore error because we calculated this dgst 120 ) 121 122 // make sure parent directories of blob exist 123 if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil { 124 return err 125 } 126 127 if _, err := os.Stat(target); err == nil { 128 // collision with the target file! 129 if err := os.RemoveAll(w.path); err != nil { 130 log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory") 131 } 132 return errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", dgst) 133 } 134 135 if err := os.Rename(ingest, target); err != nil { 136 return err 137 } 138 139 // Ingest has now been made available in the content store, attempt to complete 140 // setting metadata but errors should only be logged and not returned since 141 // the content store cannot be cleanly rolled back. 142 143 commitTime := time.Now() 144 if err := os.Chtimes(target, commitTime, commitTime); err != nil { 145 log.G(ctx).WithField("digest", dgst).Errorf("failed to change file time to commit time") 146 } 147 148 // clean up!! 149 if err := os.RemoveAll(w.path); err != nil { 150 log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory") 151 } 152 153 if w.s.ls != nil && base.Labels != nil { 154 if err := w.s.ls.Set(dgst, base.Labels); err != nil { 155 log.G(ctx).WithField("digest", dgst).Errorf("failed to set labels") 156 } 157 } 158 159 // change to readonly, more important for read, but provides _some_ 160 // protection from this point on. We use the existing perms with a mask 161 // only allowing reads honoring the umask on creation. 162 // 163 // This removes write and exec, only allowing read per the creation umask. 164 // 165 // NOTE: Windows does not support this operation 166 if runtime.GOOS != "windows" { 167 if err := os.Chmod(target, (fi.Mode()&os.ModePerm)&^0333); err != nil { 168 log.G(ctx).WithField("ref", w.ref).Errorf("failed to make readonly") 169 } 170 } 171 172 return nil 173 } 174 175 // Close the writer, flushing any unwritten data and leaving the progress in 176 // tact. 177 // 178 // If one needs to resume the transaction, a new writer can be obtained from 179 // `Ingester.Writer` using the same key. The write can then be continued 180 // from it was left off. 181 // 182 // To abandon a transaction completely, first call close then `IngestManager.Abort` to 183 // clean up the associated resources. 184 func (w *writer) Close() (err error) { 185 if w.fp != nil { 186 w.fp.Sync() 187 err = w.fp.Close() 188 writeTimestampFile(filepath.Join(w.path, "updatedat"), w.updatedAt) 189 w.fp = nil 190 unlock(w.ref) 191 return 192 } 193 194 return nil 195 } 196 197 func (w *writer) Truncate(size int64) error { 198 if size != 0 { 199 return errors.New("Truncate: unsupported size") 200 } 201 w.offset = 0 202 w.digester.Hash().Reset() 203 if _, err := w.fp.Seek(0, io.SeekStart); err != nil { 204 return err 205 } 206 return w.fp.Truncate(0) 207 }