github.com/sijibomii/docker@v0.0.0-20231230191044-5cf6ca554647/layer/migration.go (about) 1 package layer 2 3 import ( 4 "compress/gzip" 5 "errors" 6 "fmt" 7 "io" 8 "os" 9 10 "github.com/Sirupsen/logrus" 11 "github.com/docker/distribution/digest" 12 "github.com/vbatts/tar-split/tar/asm" 13 "github.com/vbatts/tar-split/tar/storage" 14 ) 15 16 // CreateRWLayerByGraphID creates a RWLayer in the layer store using 17 // the provided name with the given graphID. To get the RWLayer 18 // after migration the layer may be retrieved by the given name. 19 func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) { 20 ls.mountL.Lock() 21 defer ls.mountL.Unlock() 22 m, ok := ls.mounts[name] 23 if ok { 24 if m.parent.chainID != parent { 25 return errors.New("name conflict, mismatched parent") 26 } 27 if m.mountID != graphID { 28 return errors.New("mount already exists") 29 } 30 31 return nil 32 } 33 34 if !ls.driver.Exists(graphID) { 35 return fmt.Errorf("graph ID does not exist: %q", graphID) 36 } 37 38 var p *roLayer 39 if string(parent) != "" { 40 p = ls.get(parent) 41 if p == nil { 42 return ErrLayerDoesNotExist 43 } 44 45 // Release parent chain if error 46 defer func() { 47 if err != nil { 48 ls.layerL.Lock() 49 ls.releaseLayer(p) 50 ls.layerL.Unlock() 51 } 52 }() 53 } 54 55 // TODO: Ensure graphID has correct parent 56 57 m = &mountedLayer{ 58 name: name, 59 parent: p, 60 mountID: graphID, 61 layerStore: ls, 62 references: map[RWLayer]*referencedRWLayer{}, 63 } 64 65 // Check for existing init layer 66 initID := fmt.Sprintf("%s-init", graphID) 67 if ls.driver.Exists(initID) { 68 m.initID = initID 69 } 70 71 if err = ls.saveMount(m); err != nil { 72 return err 73 } 74 75 return nil 76 } 77 78 func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { 79 defer func() { 80 if err != nil { 81 logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err) 82 diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) 83 } 84 }() 85 86 if oldTarDataPath == "" { 87 err = errors.New("no tar-split file") 88 return 89 } 90 91 tarDataFile, err := os.Open(oldTarDataPath) 92 if err != nil { 93 return 94 } 95 defer tarDataFile.Close() 96 uncompressed, err := gzip.NewReader(tarDataFile) 97 if err != nil { 98 return 99 } 100 101 dgst := digest.Canonical.New() 102 err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) 103 if err != nil { 104 return 105 } 106 107 diffID = DiffID(dgst.Digest()) 108 err = os.RemoveAll(newTarDataPath) 109 if err != nil { 110 return 111 } 112 err = os.Link(oldTarDataPath, newTarDataPath) 113 114 return 115 } 116 117 func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { 118 rawarchive, err := ls.driver.Diff(id, parent) 119 if err != nil { 120 return 121 } 122 defer rawarchive.Close() 123 124 f, err := os.Create(newTarDataPath) 125 if err != nil { 126 return 127 } 128 defer f.Close() 129 mfz := gzip.NewWriter(f) 130 defer mfz.Close() 131 metaPacker := storage.NewJSONPacker(mfz) 132 133 packerCounter := &packSizeCounter{metaPacker, &size} 134 135 archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) 136 if err != nil { 137 return 138 } 139 dgst, err := digest.FromReader(archive) 140 if err != nil { 141 return 142 } 143 diffID = DiffID(dgst) 144 return 145 } 146 147 func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) { 148 // err is used to hold the error which will always trigger 149 // cleanup of creates sources but may not be an error returned 150 // to the caller (already exists). 151 var err error 152 var p *roLayer 153 if string(parent) != "" { 154 p = ls.get(parent) 155 if p == nil { 156 return nil, ErrLayerDoesNotExist 157 } 158 159 // Release parent chain if error 160 defer func() { 161 if err != nil { 162 ls.layerL.Lock() 163 ls.releaseLayer(p) 164 ls.layerL.Unlock() 165 } 166 }() 167 } 168 169 // Create new roLayer 170 layer := &roLayer{ 171 parent: p, 172 cacheID: graphID, 173 referenceCount: 1, 174 layerStore: ls, 175 references: map[Layer]struct{}{}, 176 diffID: diffID, 177 size: size, 178 chainID: createChainIDFromParent(parent, diffID), 179 } 180 181 ls.layerL.Lock() 182 defer ls.layerL.Unlock() 183 184 if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { 185 // Set error for cleanup, but do not return 186 err = errors.New("layer already exists") 187 return existingLayer.getReference(), nil 188 } 189 190 tx, err := ls.store.StartTransaction() 191 if err != nil { 192 return nil, err 193 } 194 195 defer func() { 196 if err != nil { 197 logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) 198 if err := tx.Cancel(); err != nil { 199 logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) 200 } 201 } 202 }() 203 204 tsw, err := tx.TarSplitWriter(false) 205 if err != nil { 206 return nil, err 207 } 208 defer tsw.Close() 209 tdf, err := os.Open(tarDataFile) 210 if err != nil { 211 return nil, err 212 } 213 defer tdf.Close() 214 _, err = io.Copy(tsw, tdf) 215 if err != nil { 216 return nil, err 217 } 218 219 if err = storeLayer(tx, layer); err != nil { 220 return nil, err 221 } 222 223 if err = tx.Commit(layer.chainID); err != nil { 224 return nil, err 225 } 226 227 ls.layerMap[layer.chainID] = layer 228 229 return layer.getReference(), nil 230 } 231 232 type unpackSizeCounter struct { 233 unpacker storage.Unpacker 234 size *int64 235 } 236 237 func (u *unpackSizeCounter) Next() (*storage.Entry, error) { 238 e, err := u.unpacker.Next() 239 if err == nil && u.size != nil { 240 *u.size += e.Size 241 } 242 return e, err 243 } 244 245 type packSizeCounter struct { 246 packer storage.Packer 247 size *int64 248 } 249 250 func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) { 251 n, err := p.packer.AddEntry(e) 252 if err == nil && p.size != nil { 253 *p.size += e.Size 254 } 255 return n, err 256 }