github.com/rumpl/bof@v23.0.0-rc.2+incompatible/layer/filestore.go (about) 1 package layer // import "github.com/docker/docker/layer" 2 3 import ( 4 "compress/gzip" 5 "encoding/json" 6 "fmt" 7 "io" 8 "os" 9 "path/filepath" 10 "regexp" 11 "strconv" 12 "strings" 13 14 "github.com/docker/distribution" 15 "github.com/docker/docker/pkg/ioutils" 16 "github.com/opencontainers/go-digest" 17 "github.com/pkg/errors" 18 "github.com/sirupsen/logrus" 19 ) 20 21 var ( 22 stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) 23 supportedAlgorithms = []digest.Algorithm{ 24 digest.SHA256, 25 // digest.SHA384, // Currently not used 26 // digest.SHA512, // Currently not used 27 } 28 ) 29 30 type fileMetadataStore struct { 31 root string 32 } 33 34 type fileMetadataTransaction struct { 35 store *fileMetadataStore 36 ws *ioutils.AtomicWriteSet 37 } 38 39 // newFSMetadataStore returns an instance of a metadata store 40 // which is backed by files on disk using the provided root 41 // as the root of metadata files. 42 func newFSMetadataStore(root string) (*fileMetadataStore, error) { 43 if err := os.MkdirAll(root, 0700); err != nil { 44 return nil, err 45 } 46 return &fileMetadataStore{ 47 root: root, 48 }, nil 49 } 50 51 func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { 52 dgst := digest.Digest(layer) 53 return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Encoded()) 54 } 55 56 func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { 57 return filepath.Join(fms.getLayerDirectory(layer), filename) 58 } 59 60 func (fms *fileMetadataStore) getMountDirectory(mount string) string { 61 return filepath.Join(fms.root, "mounts", mount) 62 } 63 64 func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { 65 return filepath.Join(fms.getMountDirectory(mount), filename) 66 } 67 68 func (fms *fileMetadataStore) StartTransaction() (*fileMetadataTransaction, error) { 69 tmpDir := filepath.Join(fms.root, "tmp") 70 if err := os.MkdirAll(tmpDir, 0755); err != nil { 71 return nil, err 72 } 73 ws, err := ioutils.NewAtomicWriteSet(tmpDir) 74 if err != nil { 75 return nil, err 76 } 77 78 return &fileMetadataTransaction{ 79 store: fms, 80 ws: ws, 81 }, nil 82 } 83 84 func (fm *fileMetadataTransaction) SetSize(size int64) error { 85 content := fmt.Sprintf("%d", size) 86 return fm.ws.WriteFile("size", []byte(content), 0644) 87 } 88 89 func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { 90 return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0644) 91 } 92 93 func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { 94 return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0644) 95 } 96 97 func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { 98 return fm.ws.WriteFile("cache-id", []byte(cacheID), 0644) 99 } 100 101 func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error { 102 jsonRef, err := json.Marshal(ref) 103 if err != nil { 104 return err 105 } 106 return fm.ws.WriteFile("descriptor.json", jsonRef, 0644) 107 } 108 109 func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { 110 f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) 111 if err != nil { 112 return nil, err 113 } 114 var wc io.WriteCloser 115 if compressInput { 116 wc = gzip.NewWriter(f) 117 } else { 118 wc = f 119 } 120 121 return ioutils.NewWriteCloserWrapper(wc, func() error { 122 wc.Close() 123 return f.Close() 124 }), nil 125 } 126 127 func (fm *fileMetadataTransaction) Commit(layer ChainID) error { 128 finalDir := fm.store.getLayerDirectory(layer) 129 if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { 130 return err 131 } 132 133 return fm.ws.Commit(finalDir) 134 } 135 136 func (fm *fileMetadataTransaction) Cancel() error { 137 return fm.ws.Cancel() 138 } 139 140 func (fm *fileMetadataTransaction) String() string { 141 return fm.ws.String() 142 } 143 144 func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { 145 content, err := os.ReadFile(fms.getLayerFilename(layer, "size")) 146 if err != nil { 147 return 0, err 148 } 149 150 size, err := strconv.ParseInt(string(content), 10, 64) 151 if err != nil { 152 return 0, err 153 } 154 155 return size, nil 156 } 157 158 func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { 159 content, err := os.ReadFile(fms.getLayerFilename(layer, "parent")) 160 if err != nil { 161 if os.IsNotExist(err) { 162 return "", nil 163 } 164 return "", err 165 } 166 167 dgst, err := digest.Parse(strings.TrimSpace(string(content))) 168 if err != nil { 169 return "", err 170 } 171 172 return ChainID(dgst), nil 173 } 174 175 func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { 176 content, err := os.ReadFile(fms.getLayerFilename(layer, "diff")) 177 if err != nil { 178 return "", err 179 } 180 181 dgst, err := digest.Parse(strings.TrimSpace(string(content))) 182 if err != nil { 183 return "", err 184 } 185 186 return DiffID(dgst), nil 187 } 188 189 func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { 190 contentBytes, err := os.ReadFile(fms.getLayerFilename(layer, "cache-id")) 191 if err != nil { 192 return "", err 193 } 194 content := strings.TrimSpace(string(contentBytes)) 195 196 if content == "" { 197 return "", errors.Errorf("invalid cache id value") 198 } 199 200 return content, nil 201 } 202 203 func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) { 204 content, err := os.ReadFile(fms.getLayerFilename(layer, "descriptor.json")) 205 if err != nil { 206 if os.IsNotExist(err) { 207 // only return empty descriptor to represent what is stored 208 return distribution.Descriptor{}, nil 209 } 210 return distribution.Descriptor{}, err 211 } 212 213 var ref distribution.Descriptor 214 err = json.Unmarshal(content, &ref) 215 if err != nil { 216 return distribution.Descriptor{}, err 217 } 218 return ref, err 219 } 220 221 func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { 222 fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) 223 if err != nil { 224 return nil, err 225 } 226 f, err := gzip.NewReader(fz) 227 if err != nil { 228 fz.Close() 229 return nil, err 230 } 231 232 return ioutils.NewReadCloserWrapper(f, func() error { 233 f.Close() 234 return fz.Close() 235 }), nil 236 } 237 238 func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { 239 if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { 240 return err 241 } 242 return os.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) 243 } 244 245 func (fms *fileMetadataStore) SetInitID(mount string, init string) error { 246 if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { 247 return err 248 } 249 return os.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) 250 } 251 252 func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { 253 if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { 254 return err 255 } 256 return os.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) 257 } 258 259 func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { 260 contentBytes, err := os.ReadFile(fms.getMountFilename(mount, "mount-id")) 261 if err != nil { 262 return "", err 263 } 264 content := strings.TrimSpace(string(contentBytes)) 265 266 if !stringIDRegexp.MatchString(content) { 267 return "", errors.New("invalid mount id value") 268 } 269 270 return content, nil 271 } 272 273 func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { 274 contentBytes, err := os.ReadFile(fms.getMountFilename(mount, "init-id")) 275 if err != nil { 276 if os.IsNotExist(err) { 277 return "", nil 278 } 279 return "", err 280 } 281 content := strings.TrimSpace(string(contentBytes)) 282 283 if !stringIDRegexp.MatchString(content) { 284 return "", errors.New("invalid init id value") 285 } 286 287 return content, nil 288 } 289 290 func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { 291 content, err := os.ReadFile(fms.getMountFilename(mount, "parent")) 292 if err != nil { 293 if os.IsNotExist(err) { 294 return "", nil 295 } 296 return "", err 297 } 298 299 dgst, err := digest.Parse(strings.TrimSpace(string(content))) 300 if err != nil { 301 return "", err 302 } 303 304 return ChainID(dgst), nil 305 } 306 307 func (fms *fileMetadataStore) getOrphan() ([]roLayer, error) { 308 var orphanLayers []roLayer 309 for _, algorithm := range supportedAlgorithms { 310 fileInfos, err := os.ReadDir(filepath.Join(fms.root, string(algorithm))) 311 if err != nil { 312 if os.IsNotExist(err) { 313 continue 314 } 315 return nil, err 316 } 317 318 for _, fi := range fileInfos { 319 if !fi.IsDir() || !strings.HasSuffix(fi.Name(), "-removing") { 320 continue 321 } 322 // At this stage, fi.Name value looks like <digest>-<random>-removing 323 // Split on '-' to get the digest value. 324 nameSplit := strings.Split(fi.Name(), "-") 325 dgst := digest.NewDigestFromEncoded(algorithm, nameSplit[0]) 326 if err := dgst.Validate(); err != nil { 327 logrus.WithError(err).WithField("digest", string(algorithm)+":"+nameSplit[0]).Debug("ignoring invalid digest") 328 continue 329 } 330 331 chainFile := filepath.Join(fms.root, string(algorithm), fi.Name(), "cache-id") 332 contentBytes, err := os.ReadFile(chainFile) 333 if err != nil { 334 if !os.IsNotExist(err) { 335 logrus.WithError(err).WithField("digest", dgst).Error("failed to read cache ID") 336 } 337 continue 338 } 339 cacheID := strings.TrimSpace(string(contentBytes)) 340 if cacheID == "" { 341 logrus.Error("invalid cache ID") 342 continue 343 } 344 345 l := &roLayer{ 346 chainID: ChainID(dgst), 347 cacheID: cacheID, 348 } 349 orphanLayers = append(orphanLayers, *l) 350 } 351 } 352 353 return orphanLayers, nil 354 } 355 356 func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { 357 var ids []ChainID 358 for _, algorithm := range supportedAlgorithms { 359 fileInfos, err := os.ReadDir(filepath.Join(fms.root, string(algorithm))) 360 if err != nil { 361 if os.IsNotExist(err) { 362 continue 363 } 364 return nil, nil, err 365 } 366 367 for _, fi := range fileInfos { 368 if fi.IsDir() && fi.Name() != "mounts" { 369 dgst := digest.NewDigestFromEncoded(algorithm, fi.Name()) 370 if err := dgst.Validate(); err != nil { 371 logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) 372 } else { 373 ids = append(ids, ChainID(dgst)) 374 } 375 } 376 } 377 } 378 379 fileInfos, err := os.ReadDir(filepath.Join(fms.root, "mounts")) 380 if err != nil { 381 if os.IsNotExist(err) { 382 return ids, []string{}, nil 383 } 384 return nil, nil, err 385 } 386 387 var mounts []string 388 for _, fi := range fileInfos { 389 if fi.IsDir() { 390 mounts = append(mounts, fi.Name()) 391 } 392 } 393 394 return ids, mounts, nil 395 } 396 397 // Remove layerdb folder if that is marked for removal 398 func (fms *fileMetadataStore) Remove(layer ChainID, cache string) error { 399 dgst := digest.Digest(layer) 400 files, err := os.ReadDir(filepath.Join(fms.root, string(dgst.Algorithm()))) 401 if err != nil { 402 return err 403 } 404 for _, f := range files { 405 if !strings.HasSuffix(f.Name(), "-removing") || !strings.HasPrefix(f.Name(), dgst.Encoded()) { 406 continue 407 } 408 409 // Make sure that we only remove layerdb folder which points to 410 // requested cacheID 411 dir := filepath.Join(fms.root, string(dgst.Algorithm()), f.Name()) 412 chainFile := filepath.Join(dir, "cache-id") 413 contentBytes, err := os.ReadFile(chainFile) 414 if err != nil { 415 logrus.WithError(err).WithField("file", chainFile).Error("cannot get cache ID") 416 continue 417 } 418 cacheID := strings.TrimSpace(string(contentBytes)) 419 if cacheID != cache { 420 continue 421 } 422 logrus.Debugf("Removing folder: %s", dir) 423 err = os.RemoveAll(dir) 424 if err != nil && !os.IsNotExist(err) { 425 logrus.WithError(err).WithField("name", f.Name()).Error("cannot remove layer") 426 continue 427 } 428 } 429 return nil 430 } 431 432 func (fms *fileMetadataStore) RemoveMount(mount string) error { 433 return os.RemoveAll(fms.getMountDirectory(mount)) 434 }