github.com/mika/distribution@v2.2.2-0.20160108133430-a75790e3d8e0+incompatible/registry/storage/blobwriter_resumable.go (about) 1 // +build !noresumabledigest 2 3 package storage 4 5 import ( 6 "fmt" 7 "io" 8 "os" 9 "path" 10 "strconv" 11 12 "github.com/Sirupsen/logrus" 13 "github.com/docker/distribution/context" 14 storagedriver "github.com/docker/distribution/registry/storage/driver" 15 "github.com/stevvooe/resumable" 16 17 // register resumable hashes with import 18 _ "github.com/stevvooe/resumable/sha256" 19 _ "github.com/stevvooe/resumable/sha512" 20 ) 21 22 // resumeDigestAt attempts to restore the state of the internal hash function 23 // by loading the most recent saved hash state less than or equal to the given 24 // offset. Any unhashed bytes remaining less than the given offset are hashed 25 // from the content uploaded so far. 26 func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { 27 if !bw.resumableDigestEnabled { 28 return errResumableDigestNotAvailable 29 } 30 31 if offset < 0 { 32 return fmt.Errorf("cannot resume hash at negative offset: %d", offset) 33 } 34 35 h, ok := bw.digester.Hash().(resumable.Hash) 36 if !ok { 37 return errResumableDigestNotAvailable 38 } 39 40 if offset == int64(h.Len()) { 41 // State of digester is already at the requested offset. 42 return nil 43 } 44 45 // List hash states from storage backend. 46 var hashStateMatch hashStateEntry 47 hashStates, err := bw.getStoredHashStates(ctx) 48 if err != nil { 49 return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) 50 } 51 52 // Find the highest stored hashState with offset less than or equal to 53 // the requested offset. 54 for _, hashState := range hashStates { 55 if hashState.offset == offset { 56 hashStateMatch = hashState 57 break // Found an exact offset match. 58 } else if hashState.offset < offset && hashState.offset > hashStateMatch.offset { 59 // This offset is closer to the requested offset. 60 hashStateMatch = hashState 61 } else if hashState.offset > offset { 62 // Remove any stored hash state with offsets higher than this one 63 // as writes to this resumed hasher will make those invalid. This 64 // is probably okay to skip for now since we don't expect anyone to 65 // use the API in this way. For that reason, we don't treat an 66 // an error here as a fatal error, but only log it. 67 if err := bw.driver.Delete(ctx, hashState.path); err != nil { 68 logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err) 69 } 70 } 71 } 72 73 if hashStateMatch.offset == 0 { 74 // No need to load any state, just reset the hasher. 75 h.Reset() 76 } else { 77 storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path) 78 if err != nil { 79 return err 80 } 81 82 if err = h.Restore(storedState); err != nil { 83 return err 84 } 85 } 86 87 // Mind the gap. 88 if gapLen := offset - int64(h.Len()); gapLen > 0 { 89 // Need to read content from the upload to catch up to the desired offset. 90 fr, err := newFileReader(ctx, bw.driver, bw.path, bw.size) 91 if err != nil { 92 return err 93 } 94 defer fr.Close() 95 96 if _, err = fr.Seek(int64(h.Len()), os.SEEK_SET); err != nil { 97 return fmt.Errorf("unable to seek to layer reader offset %d: %s", h.Len(), err) 98 } 99 100 if _, err := io.CopyN(h, fr, gapLen); err != nil { 101 return err 102 } 103 } 104 105 return nil 106 } 107 108 type hashStateEntry struct { 109 offset int64 110 path string 111 } 112 113 // getStoredHashStates returns a slice of hashStateEntries for this upload. 114 func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { 115 uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{ 116 name: bw.blobStore.repository.Name(), 117 id: bw.id, 118 alg: bw.digester.Digest().Algorithm(), 119 list: true, 120 }) 121 122 if err != nil { 123 return nil, err 124 } 125 126 paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix) 127 if err != nil { 128 if _, ok := err.(storagedriver.PathNotFoundError); !ok { 129 return nil, err 130 } 131 // Treat PathNotFoundError as no entries. 132 paths = nil 133 } 134 135 hashStateEntries := make([]hashStateEntry, 0, len(paths)) 136 137 for _, p := range paths { 138 pathSuffix := path.Base(p) 139 // The suffix should be the offset. 140 offset, err := strconv.ParseInt(pathSuffix, 0, 64) 141 if err != nil { 142 logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) 143 } 144 145 hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) 146 } 147 148 return hashStateEntries, nil 149 } 150 151 func (bw *blobWriter) storeHashState(ctx context.Context) error { 152 if !bw.resumableDigestEnabled { 153 return errResumableDigestNotAvailable 154 } 155 156 h, ok := bw.digester.Hash().(resumable.Hash) 157 if !ok { 158 return errResumableDigestNotAvailable 159 } 160 161 uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{ 162 name: bw.blobStore.repository.Name(), 163 id: bw.id, 164 alg: bw.digester.Digest().Algorithm(), 165 offset: int64(h.Len()), 166 }) 167 168 if err != nil { 169 return err 170 } 171 172 hashState, err := h.State() 173 if err != nil { 174 return err 175 } 176 177 return bw.driver.PutContent(ctx, uploadHashStatePath, hashState) 178 }