github.com/hugh712/snapd@v0.0.0-20200910133618-1a99902bd583/overlord/snapshotstate/backend/backend.go (about) 1 // -*- Mode: Go; indent-tabs-mode: t -*- 2 3 /* 4 * Copyright (C) 2018 Canonical Ltd 5 * 6 * This program is free software: you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 3 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 */ 19 20 package backend 21 22 import ( 23 "archive/tar" 24 "archive/zip" 25 "context" 26 "crypto" 27 "encoding/json" 28 "errors" 29 "fmt" 30 "io" 31 "os" 32 "path" 33 "path/filepath" 34 "runtime" 35 "sort" 36 "syscall" 37 "time" 38 39 "github.com/snapcore/snapd/client" 40 "github.com/snapcore/snapd/dirs" 41 "github.com/snapcore/snapd/logger" 42 "github.com/snapcore/snapd/osutil" 43 "github.com/snapcore/snapd/snap" 44 "github.com/snapcore/snapd/snapdenv" 45 "github.com/snapcore/snapd/strutil" 46 ) 47 48 const ( 49 archiveName = "archive.tgz" 50 metadataName = "meta.json" 51 metaHashName = "meta.sha3_384" 52 53 userArchivePrefix = "user/" 54 userArchiveSuffix = ".tgz" 55 ) 56 57 var ( 58 // Stop is used to ask Iter to stop iteration, without it being an error. 59 Stop = errors.New("stop iteration") 60 61 osOpen = os.Open 62 dirNames = (*os.File).Readdirnames 63 backendOpen = Open 64 timeNow = time.Now 65 66 usersForUsernames = usersForUsernamesImpl 67 ) 68 69 // Flags encompasses extra flags for snapshots backend Save. 70 type Flags struct { 71 Auto bool 72 } 73 74 // Iter loops over all snapshots in the snapshots directory, applying the given 75 // function to each. The snapshot will be closed after the function returns. If 76 // the function returns an error, iteration is stopped (and if the error isn't 77 // Stop, it's returned as the error of the iterator). 78 func Iter(ctx context.Context, f func(*Reader) error) error { 79 if err := ctx.Err(); err != nil { 80 return err 81 } 82 83 dir, err := osOpen(dirs.SnapshotsDir) 84 if err != nil { 85 if osutil.IsDirNotExist(err) { 86 // no dir -> no snapshots 87 return nil 88 } 89 return fmt.Errorf("cannot open snapshots directory: %v", err) 90 } 91 defer dir.Close() 92 93 var names []string 94 var readErr error 95 for readErr == nil && err == nil { 96 names, readErr = dirNames(dir, 100) 97 // note os.Readdirnames can return a non-empty names and a non-nil err 98 for _, name := range names { 99 if err = ctx.Err(); err != nil { 100 break 101 } 102 103 filename := filepath.Join(dirs.SnapshotsDir, name) 104 reader, openError := backendOpen(filename) 105 // reader can be non-nil even when openError is not nil (in 106 // which case reader.Broken will have a reason). f can 107 // check and either ignore or return an error when 108 // finding a broken snapshot. 109 if reader != nil { 110 err = f(reader) 111 } else { 112 // TODO: use warnings instead 113 logger.Noticef("Cannot open snapshot %q: %v.", name, openError) 114 } 115 if openError == nil { 116 // if openError was nil the snapshot was opened and needs closing 117 if closeError := reader.Close(); err == nil { 118 err = closeError 119 } 120 } 121 if err != nil { 122 break 123 } 124 } 125 } 126 127 if readErr != nil && readErr != io.EOF { 128 return readErr 129 } 130 131 if err == Stop { 132 err = nil 133 } 134 135 return err 136 } 137 138 // List valid snapshots sets. 139 func List(ctx context.Context, setID uint64, snapNames []string) ([]client.SnapshotSet, error) { 140 setshots := map[uint64][]*client.Snapshot{} 141 err := Iter(ctx, func(reader *Reader) error { 142 if setID == 0 || reader.SetID == setID { 143 if len(snapNames) == 0 || strutil.ListContains(snapNames, reader.Snap) { 144 setshots[reader.SetID] = append(setshots[reader.SetID], &reader.Snapshot) 145 } 146 } 147 return nil 148 }) 149 150 sets := make([]client.SnapshotSet, 0, len(setshots)) 151 for id, shots := range setshots { 152 sort.Sort(bySnap(shots)) 153 sets = append(sets, client.SnapshotSet{ID: id, Snapshots: shots}) 154 } 155 156 sort.Sort(byID(sets)) 157 158 return sets, err 159 } 160 161 // Filename of the given client.Snapshot in this backend. 162 func Filename(snapshot *client.Snapshot) string { 163 // this _needs_ the snap name and version to be valid 164 return filepath.Join(dirs.SnapshotsDir, fmt.Sprintf("%d_%s_%s_%s.zip", snapshot.SetID, snapshot.Snap, snapshot.Version, snapshot.Revision)) 165 } 166 167 // EstimateSnapshotSize calculates estimated size of the snapshot. 168 func EstimateSnapshotSize(si *snap.Info, usernames []string) (uint64, error) { 169 var total uint64 170 calculateSize := func(path string, finfo os.FileInfo, err error) error { 171 if finfo.Mode().IsRegular() { 172 total += uint64(finfo.Size()) 173 } 174 return err 175 } 176 177 visitDir := func(dir string) error { 178 exists, isDir, err := osutil.DirExists(dir) 179 if err != nil { 180 return err 181 } 182 if !(exists && isDir) { 183 return nil 184 } 185 return filepath.Walk(dir, calculateSize) 186 } 187 188 for _, dir := range []string{si.DataDir(), si.CommonDataDir()} { 189 if err := visitDir(dir); err != nil { 190 return 0, err 191 } 192 } 193 194 users, err := usersForUsernames(usernames) 195 if err != nil { 196 return 0, err 197 } 198 for _, usr := range users { 199 if err := visitDir(si.UserDataDir(usr.HomeDir)); err != nil { 200 return 0, err 201 } 202 if err := visitDir(si.UserCommonDataDir(usr.HomeDir)); err != nil { 203 return 0, err 204 } 205 } 206 207 // XXX: we could use a typical compression factor here 208 return total, nil 209 } 210 211 // Save a snapshot 212 func Save(ctx context.Context, id uint64, si *snap.Info, cfg map[string]interface{}, usernames []string, flags *Flags) (*client.Snapshot, error) { 213 if err := os.MkdirAll(dirs.SnapshotsDir, 0700); err != nil { 214 return nil, err 215 } 216 217 var auto bool 218 if flags != nil { 219 auto = flags.Auto 220 } 221 222 snapshot := &client.Snapshot{ 223 SetID: id, 224 Snap: si.InstanceName(), 225 SnapID: si.SnapID, 226 Revision: si.Revision, 227 Version: si.Version, 228 Epoch: si.Epoch, 229 Time: timeNow(), 230 SHA3_384: make(map[string]string), 231 Size: 0, 232 Conf: cfg, 233 Auto: auto, 234 } 235 236 aw, err := osutil.NewAtomicFile(Filename(snapshot), 0600, 0, osutil.NoChown, osutil.NoChown) 237 if err != nil { 238 return nil, err 239 } 240 // if things worked, we'll commit (and Cancel becomes a NOP) 241 defer aw.Cancel() 242 243 w := zip.NewWriter(aw) 244 defer w.Close() // note this does not close the file descriptor (that's done by hand on the atomic writer, above) 245 if err := addDirToZip(ctx, snapshot, w, "root", archiveName, si.DataDir()); err != nil { 246 return nil, err 247 } 248 249 users, err := usersForUsernames(usernames) 250 if err != nil { 251 return nil, err 252 } 253 254 for _, usr := range users { 255 if err := addDirToZip(ctx, snapshot, w, usr.Username, userArchiveName(usr), si.UserDataDir(usr.HomeDir)); err != nil { 256 return nil, err 257 } 258 } 259 260 metaWriter, err := w.Create(metadataName) 261 if err != nil { 262 return nil, err 263 } 264 265 hasher := crypto.SHA3_384.New() 266 enc := json.NewEncoder(io.MultiWriter(metaWriter, hasher)) 267 if err := enc.Encode(snapshot); err != nil { 268 return nil, err 269 } 270 271 hashWriter, err := w.Create(metaHashName) 272 if err != nil { 273 return nil, err 274 } 275 fmt.Fprintf(hashWriter, "%x\n", hasher.Sum(nil)) 276 if err := w.Close(); err != nil { 277 return nil, err 278 } 279 280 if err := ctx.Err(); err != nil { 281 return nil, err 282 } 283 284 if err := aw.Commit(); err != nil { 285 return nil, err 286 } 287 288 return snapshot, nil 289 } 290 291 var isTesting = snapdenv.Testing() 292 293 func addDirToZip(ctx context.Context, snapshot *client.Snapshot, w *zip.Writer, username string, entry, dir string) error { 294 parent, revdir := filepath.Split(dir) 295 exists, isDir, err := osutil.DirExists(parent) 296 if err != nil { 297 return err 298 } 299 if exists && !isDir { 300 logger.Noticef("Not saving directories under %q in snapshot #%d of %q as it is not a directory.", parent, snapshot.SetID, snapshot.Snap) 301 return nil 302 } 303 if !exists { 304 logger.Debugf("Not saving directories under %q in snapshot #%d of %q as it is does not exist.", parent, snapshot.SetID, snapshot.Snap) 305 return nil 306 } 307 tarArgs := []string{ 308 "--create", 309 "--sparse", "--gzip", 310 "--directory", parent, 311 } 312 313 noRev, noCommon := true, true 314 315 exists, isDir, err = osutil.DirExists(dir) 316 if err != nil { 317 return err 318 } 319 switch { 320 case exists && isDir: 321 tarArgs = append(tarArgs, revdir) 322 noRev = false 323 case exists && !isDir: 324 logger.Noticef("Not saving %q in snapshot #%d of %q as it is not a directory.", dir, snapshot.SetID, snapshot.Snap) 325 case !exists: 326 logger.Debugf("Not saving %q in snapshot #%d of %q as it is does not exist.", dir, snapshot.SetID, snapshot.Snap) 327 } 328 329 common := filepath.Join(parent, "common") 330 exists, isDir, err = osutil.DirExists(common) 331 if err != nil { 332 return err 333 } 334 switch { 335 case exists && isDir: 336 tarArgs = append(tarArgs, "common") 337 noCommon = false 338 case exists && !isDir: 339 logger.Noticef("Not saving %q in snapshot #%d of %q as it is not a directory.", common, snapshot.SetID, snapshot.Snap) 340 case !exists: 341 logger.Debugf("Not saving %q in snapshot #%d of %q as it is does not exist.", common, snapshot.SetID, snapshot.Snap) 342 } 343 344 if noCommon && noRev { 345 return nil 346 } 347 348 archiveWriter, err := w.CreateHeader(&zip.FileHeader{Name: entry}) 349 if err != nil { 350 return err 351 } 352 353 var sz osutil.Sizer 354 hasher := crypto.SHA3_384.New() 355 356 cmd := tarAsUser(username, tarArgs...) 357 cmd.Stdout = io.MultiWriter(archiveWriter, hasher, &sz) 358 matchCounter := &strutil.MatchCounter{N: 1} 359 cmd.Stderr = matchCounter 360 if isTesting { 361 matchCounter.N = -1 362 cmd.Stderr = io.MultiWriter(os.Stderr, matchCounter) 363 } 364 if err := osutil.RunWithContext(ctx, cmd); err != nil { 365 matches, count := matchCounter.Matches() 366 if count > 0 { 367 return fmt.Errorf("cannot create archive: %s (and %d more)", matches[0], count-1) 368 } 369 return fmt.Errorf("tar failed: %v", err) 370 } 371 372 snapshot.SHA3_384[entry] = fmt.Sprintf("%x", hasher.Sum(nil)) 373 snapshot.Size += sz.Size() 374 375 return nil 376 } 377 378 type exportMetadata struct { 379 Format int `json:"format"` 380 Date time.Time `json:"date"` 381 Files []string `json:"files"` 382 } 383 384 type SnapshotExport struct { 385 // open snapshot files 386 snapshotFiles []*os.File 387 388 // remember setID mostly for nicer errors 389 setID uint64 390 391 // cached size, needs to be calculated with CalculateSize 392 size int64 393 } 394 395 // NewSnapshotExport will return a SnapshotExport structure. It must be 396 // Close()ed after use to avoid leaking file descriptors. 397 func NewSnapshotExport(ctx context.Context, setID uint64) (se *SnapshotExport, err error) { 398 var snapshotFiles []*os.File 399 400 defer func() { 401 // cleanup any open FDs if anything goes wrong 402 if err != nil { 403 for _, f := range snapshotFiles { 404 f.Close() 405 } 406 } 407 }() 408 409 // Open all files first and keep the file descriptors 410 // open. The caller should have locked the state so that no 411 // delete/change snapshot operations can happen while the 412 // files are getting opened. 413 err = Iter(ctx, func(reader *Reader) error { 414 if reader.SetID == setID { 415 // Duplicate the file descriptor of the reader we were handed as 416 // Iter() closes those as soon as this unnamed returns. We 417 // re-package the file descriptor into snapshotFiles below. 418 fd, err := syscall.Dup(int(reader.Fd())) 419 if err != nil { 420 return fmt.Errorf("cannot duplicate descriptor: %v", err) 421 } 422 f := os.NewFile(uintptr(fd), reader.Name()) 423 if f == nil { 424 return fmt.Errorf("cannot open file from descriptor %d", fd) 425 } 426 snapshotFiles = append(snapshotFiles, f) 427 } 428 return nil 429 }) 430 if err != nil { 431 return nil, fmt.Errorf("cannot export snapshot %v: %v", setID, err) 432 } 433 if len(snapshotFiles) == 0 { 434 return nil, fmt.Errorf("no snapshot data found for %v", setID) 435 } 436 437 se = &SnapshotExport{snapshotFiles: snapshotFiles, setID: setID} 438 439 // ensure we never leak FDs even if the user does not call close 440 runtime.SetFinalizer(se, (*SnapshotExport).Close) 441 442 return se, nil 443 } 444 445 // Init will calculate the snapshot size. This can take some time 446 // so it should be called without any locks. The SnapshotExport 447 // keeps the FDs open so even files moved/deleted will be found. 448 func (se *SnapshotExport) Init() error { 449 // Export once into a dummy writer so that we can set the size 450 // of the export. This is then used to set the Content-Length 451 // in the response correctly. 452 // 453 // Note that the size of the generated tar could change if the 454 // time switches between this export and the export we stream 455 // to the client to a time after the year 2242. This is unlikely 456 // but a known issue with this approach here. 457 var sz osutil.Sizer 458 if err := se.StreamTo(&sz); err != nil { 459 return fmt.Errorf("cannot calculcate the size for %v: %s", se.setID, err) 460 } 461 se.size = sz.Size() 462 return nil 463 } 464 465 func (se *SnapshotExport) Size() int64 { 466 return se.size 467 } 468 469 func (se *SnapshotExport) Close() { 470 for _, f := range se.snapshotFiles { 471 f.Close() 472 } 473 se.snapshotFiles = nil 474 } 475 476 func (se *SnapshotExport) StreamTo(w io.Writer) error { 477 // write out a tar 478 var files []string 479 tw := tar.NewWriter(w) 480 defer tw.Close() 481 for _, snapshotFile := range se.snapshotFiles { 482 stat, err := snapshotFile.Stat() 483 if err != nil { 484 return err 485 } 486 if !stat.Mode().IsRegular() { 487 // should never happen 488 return fmt.Errorf("unexported special file %q in snapshot: %s", stat.Name(), stat.Mode()) 489 } 490 if _, err := snapshotFile.Seek(0, 0); err != nil { 491 return fmt.Errorf("cannot seek on %v: %v", stat.Name(), err) 492 } 493 hdr, err := tar.FileInfoHeader(stat, "") 494 if err != nil { 495 return fmt.Errorf("symlink: %v", stat.Name()) 496 } 497 if err = tw.WriteHeader(hdr); err != nil { 498 return fmt.Errorf("cannot write header for %v: %v", stat.Name(), err) 499 } 500 if _, err := io.Copy(tw, snapshotFile); err != nil { 501 return fmt.Errorf("cannot write data for %v: %v", stat.Name(), err) 502 } 503 504 files = append(files, path.Base(snapshotFile.Name())) 505 } 506 507 // write the metadata last, then the client can use that to 508 // validate the archive is complete 509 meta := exportMetadata{ 510 Format: 1, 511 Date: timeNow(), 512 Files: files, 513 } 514 metaDataBuf, err := json.Marshal(&meta) 515 if err != nil { 516 return fmt.Errorf("cannot marshal meta-data: %v", err) 517 } 518 hdr := &tar.Header{ 519 Typeflag: tar.TypeReg, 520 Name: "export.json", 521 Size: int64(len(metaDataBuf)), 522 Mode: 0640, 523 ModTime: timeNow(), 524 } 525 if err := tw.WriteHeader(hdr); err != nil { 526 return err 527 } 528 if _, err := tw.Write(metaDataBuf); err != nil { 529 return err 530 } 531 532 return nil 533 }