github.com/rawahars/moby@v24.0.4+incompatible/layer/filestore.go (about) 1 package layer // import "github.com/docker/docker/layer" 2 3 import ( 4 "compress/gzip" 5 "encoding/json" 6 "io" 7 "os" 8 "path/filepath" 9 "regexp" 10 "strconv" 11 "strings" 12 13 "github.com/docker/distribution" 14 "github.com/docker/docker/pkg/ioutils" 15 "github.com/opencontainers/go-digest" 16 "github.com/pkg/errors" 17 "github.com/sirupsen/logrus" 18 ) 19 20 var ( 21 stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) 22 supportedAlgorithms = []digest.Algorithm{ 23 digest.SHA256, 24 // digest.SHA384, // Currently not used 25 // digest.SHA512, // Currently not used 26 } 27 ) 28 29 type fileMetadataStore struct { 30 root string 31 } 32 33 type fileMetadataTransaction struct { 34 store *fileMetadataStore 35 ws *ioutils.AtomicWriteSet 36 } 37 38 // newFSMetadataStore returns an instance of a metadata store 39 // which is backed by files on disk using the provided root 40 // as the root of metadata files. 41 func newFSMetadataStore(root string) (*fileMetadataStore, error) { 42 if err := os.MkdirAll(root, 0o700); err != nil { 43 return nil, err 44 } 45 return &fileMetadataStore{ 46 root: root, 47 }, nil 48 } 49 50 func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { 51 dgst := digest.Digest(layer) 52 return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Encoded()) 53 } 54 55 func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { 56 return filepath.Join(fms.getLayerDirectory(layer), filename) 57 } 58 59 func (fms *fileMetadataStore) getMountDirectory(mount string) string { 60 return filepath.Join(fms.root, "mounts", mount) 61 } 62 63 func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { 64 return filepath.Join(fms.getMountDirectory(mount), filename) 65 } 66 67 func (fms *fileMetadataStore) StartTransaction() (*fileMetadataTransaction, error) { 68 tmpDir := filepath.Join(fms.root, "tmp") 69 if err := os.MkdirAll(tmpDir, 0o755); err != nil { 70 return nil, err 71 } 72 ws, err := ioutils.NewAtomicWriteSet(tmpDir) 73 if err != nil { 74 return nil, err 75 } 76 77 return &fileMetadataTransaction{ 78 store: fms, 79 ws: ws, 80 }, nil 81 } 82 83 func (fm *fileMetadataTransaction) SetSize(size int64) error { 84 return fm.ws.WriteFile("size", []byte(strconv.FormatInt(size, 10)), 0o644) 85 } 86 87 func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { 88 return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0o644) 89 } 90 91 func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { 92 return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0o644) 93 } 94 95 func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { 96 return fm.ws.WriteFile("cache-id", []byte(cacheID), 0o644) 97 } 98 99 func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error { 100 jsonRef, err := json.Marshal(ref) 101 if err != nil { 102 return err 103 } 104 return fm.ws.WriteFile("descriptor.json", jsonRef, 0o644) 105 } 106 107 func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { 108 f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0o644) 109 if err != nil { 110 return nil, err 111 } 112 var wc io.WriteCloser 113 if compressInput { 114 wc = gzip.NewWriter(f) 115 } else { 116 wc = f 117 } 118 119 return ioutils.NewWriteCloserWrapper(wc, func() error { 120 wc.Close() 121 return f.Close() 122 }), nil 123 } 124 125 func (fm *fileMetadataTransaction) Commit(layer ChainID) error { 126 finalDir := fm.store.getLayerDirectory(layer) 127 if err := os.MkdirAll(filepath.Dir(finalDir), 0o755); err != nil { 128 return err 129 } 130 131 return fm.ws.Commit(finalDir) 132 } 133 134 func (fm *fileMetadataTransaction) Cancel() error { 135 return fm.ws.Cancel() 136 } 137 138 func (fm *fileMetadataTransaction) String() string { 139 return fm.ws.String() 140 } 141 142 func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { 143 content, err := os.ReadFile(fms.getLayerFilename(layer, "size")) 144 if err != nil { 145 return 0, err 146 } 147 148 size, err := strconv.ParseInt(string(content), 10, 64) 149 if err != nil { 150 return 0, err 151 } 152 153 return size, nil 154 } 155 156 func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { 157 content, err := os.ReadFile(fms.getLayerFilename(layer, "parent")) 158 if err != nil { 159 if os.IsNotExist(err) { 160 return "", nil 161 } 162 return "", err 163 } 164 165 dgst, err := digest.Parse(strings.TrimSpace(string(content))) 166 if err != nil { 167 return "", err 168 } 169 170 return ChainID(dgst), nil 171 } 172 173 func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { 174 content, err := os.ReadFile(fms.getLayerFilename(layer, "diff")) 175 if err != nil { 176 return "", err 177 } 178 179 dgst, err := digest.Parse(strings.TrimSpace(string(content))) 180 if err != nil { 181 return "", err 182 } 183 184 return DiffID(dgst), nil 185 } 186 187 func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { 188 contentBytes, err := os.ReadFile(fms.getLayerFilename(layer, "cache-id")) 189 if err != nil { 190 return "", err 191 } 192 content := strings.TrimSpace(string(contentBytes)) 193 194 if content == "" { 195 return "", errors.Errorf("invalid cache id value") 196 } 197 198 return content, nil 199 } 200 201 func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) { 202 content, err := os.ReadFile(fms.getLayerFilename(layer, "descriptor.json")) 203 if err != nil { 204 if os.IsNotExist(err) { 205 // only return empty descriptor to represent what is stored 206 return distribution.Descriptor{}, nil 207 } 208 return distribution.Descriptor{}, err 209 } 210 211 var ref distribution.Descriptor 212 err = json.Unmarshal(content, &ref) 213 if err != nil { 214 return distribution.Descriptor{}, err 215 } 216 return ref, err 217 } 218 219 func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { 220 fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) 221 if err != nil { 222 return nil, err 223 } 224 f, err := gzip.NewReader(fz) 225 if err != nil { 226 fz.Close() 227 return nil, err 228 } 229 230 return ioutils.NewReadCloserWrapper(f, func() error { 231 f.Close() 232 return fz.Close() 233 }), nil 234 } 235 236 func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { 237 if err := os.MkdirAll(fms.getMountDirectory(mount), 0o755); err != nil { 238 return err 239 } 240 return os.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0o644) 241 } 242 243 func (fms *fileMetadataStore) SetInitID(mount string, init string) error { 244 if err := os.MkdirAll(fms.getMountDirectory(mount), 0o755); err != nil { 245 return err 246 } 247 return os.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0o644) 248 } 249 250 func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { 251 if err := os.MkdirAll(fms.getMountDirectory(mount), 0o755); err != nil { 252 return err 253 } 254 return os.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0o644) 255 } 256 257 func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { 258 contentBytes, err := os.ReadFile(fms.getMountFilename(mount, "mount-id")) 259 if err != nil { 260 return "", err 261 } 262 content := strings.TrimSpace(string(contentBytes)) 263 264 if !stringIDRegexp.MatchString(content) { 265 return "", errors.New("invalid mount id value") 266 } 267 268 return content, nil 269 } 270 271 func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { 272 contentBytes, err := os.ReadFile(fms.getMountFilename(mount, "init-id")) 273 if err != nil { 274 if os.IsNotExist(err) { 275 return "", nil 276 } 277 return "", err 278 } 279 content := strings.TrimSpace(string(contentBytes)) 280 281 if !stringIDRegexp.MatchString(content) { 282 return "", errors.New("invalid init id value") 283 } 284 285 return content, nil 286 } 287 288 func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { 289 content, err := os.ReadFile(fms.getMountFilename(mount, "parent")) 290 if err != nil { 291 if os.IsNotExist(err) { 292 return "", nil 293 } 294 return "", err 295 } 296 297 dgst, err := digest.Parse(strings.TrimSpace(string(content))) 298 if err != nil { 299 return "", err 300 } 301 302 return ChainID(dgst), nil 303 } 304 305 func (fms *fileMetadataStore) getOrphan() ([]roLayer, error) { 306 var orphanLayers []roLayer 307 for _, algorithm := range supportedAlgorithms { 308 fileInfos, err := os.ReadDir(filepath.Join(fms.root, string(algorithm))) 309 if err != nil { 310 if os.IsNotExist(err) { 311 continue 312 } 313 return nil, err 314 } 315 316 for _, fi := range fileInfos { 317 if !fi.IsDir() || !strings.HasSuffix(fi.Name(), "-removing") { 318 continue 319 } 320 // At this stage, fi.Name value looks like <digest>-<random>-removing 321 // Split on '-' to get the digest value. 322 nameSplit := strings.Split(fi.Name(), "-") 323 dgst := digest.NewDigestFromEncoded(algorithm, nameSplit[0]) 324 if err := dgst.Validate(); err != nil { 325 logrus.WithError(err).WithField("digest", string(algorithm)+":"+nameSplit[0]).Debug("ignoring invalid digest") 326 continue 327 } 328 329 chainFile := filepath.Join(fms.root, string(algorithm), fi.Name(), "cache-id") 330 contentBytes, err := os.ReadFile(chainFile) 331 if err != nil { 332 if !os.IsNotExist(err) { 333 logrus.WithError(err).WithField("digest", dgst).Error("failed to read cache ID") 334 } 335 continue 336 } 337 cacheID := strings.TrimSpace(string(contentBytes)) 338 if cacheID == "" { 339 logrus.Error("invalid cache ID") 340 continue 341 } 342 343 l := &roLayer{ 344 chainID: ChainID(dgst), 345 cacheID: cacheID, 346 } 347 orphanLayers = append(orphanLayers, *l) 348 } 349 } 350 351 return orphanLayers, nil 352 } 353 354 func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { 355 var ids []ChainID 356 for _, algorithm := range supportedAlgorithms { 357 fileInfos, err := os.ReadDir(filepath.Join(fms.root, string(algorithm))) 358 if err != nil { 359 if os.IsNotExist(err) { 360 continue 361 } 362 return nil, nil, err 363 } 364 365 for _, fi := range fileInfos { 366 if fi.IsDir() && fi.Name() != "mounts" { 367 dgst := digest.NewDigestFromEncoded(algorithm, fi.Name()) 368 if err := dgst.Validate(); err != nil { 369 logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) 370 } else { 371 ids = append(ids, ChainID(dgst)) 372 } 373 } 374 } 375 } 376 377 fileInfos, err := os.ReadDir(filepath.Join(fms.root, "mounts")) 378 if err != nil { 379 if os.IsNotExist(err) { 380 return ids, []string{}, nil 381 } 382 return nil, nil, err 383 } 384 385 var mounts []string 386 for _, fi := range fileInfos { 387 if fi.IsDir() { 388 mounts = append(mounts, fi.Name()) 389 } 390 } 391 392 return ids, mounts, nil 393 } 394 395 // Remove layerdb folder if that is marked for removal 396 func (fms *fileMetadataStore) Remove(layer ChainID, cache string) error { 397 dgst := digest.Digest(layer) 398 files, err := os.ReadDir(filepath.Join(fms.root, string(dgst.Algorithm()))) 399 if err != nil { 400 return err 401 } 402 for _, f := range files { 403 if !strings.HasSuffix(f.Name(), "-removing") || !strings.HasPrefix(f.Name(), dgst.Encoded()) { 404 continue 405 } 406 407 // Make sure that we only remove layerdb folder which points to 408 // requested cacheID 409 dir := filepath.Join(fms.root, string(dgst.Algorithm()), f.Name()) 410 chainFile := filepath.Join(dir, "cache-id") 411 contentBytes, err := os.ReadFile(chainFile) 412 if err != nil { 413 logrus.WithError(err).WithField("file", chainFile).Error("cannot get cache ID") 414 continue 415 } 416 cacheID := strings.TrimSpace(string(contentBytes)) 417 if cacheID != cache { 418 continue 419 } 420 logrus.Debugf("Removing folder: %s", dir) 421 err = os.RemoveAll(dir) 422 if err != nil && !os.IsNotExist(err) { 423 logrus.WithError(err).WithField("name", f.Name()).Error("cannot remove layer") 424 continue 425 } 426 } 427 return nil 428 } 429 430 func (fms *fileMetadataStore) RemoveMount(mount string) error { 431 return os.RemoveAll(fms.getMountDirectory(mount)) 432 }