github.com/axw/juju@v0.0.0-20161005053422-4bd6544d08d4/state/backups/create.go (about)

     1  // Copyright 2014 Canonical Ltd.
     2  // Licensed under the AGPLv3, see LICENCE file for details.
     3  
     4  package backups
     5  
     6  import (
     7  	"compress/gzip"
     8  	"crypto/sha1"
     9  	"fmt"
    10  	"io"
    11  	"io/ioutil"
    12  	"os"
    13  	"path/filepath"
    14  
    15  	"github.com/juju/errors"
    16  	"github.com/juju/loggo"
    17  	"github.com/juju/utils/hash"
    18  	"github.com/juju/utils/tar"
    19  )
    20  
    21  // TODO(ericsnow) One concern is files that get out of date by the time
    22  // backup finishes running.  This is particularly a problem with log
    23  // files.
    24  
    25  const (
    26  	tempPrefix   = "jujuBackup-"
    27  	tempFilename = "juju-backup.tar.gz"
    28  )
    29  
    30  type createArgs struct {
    31  	filesToBackUp  []string
    32  	db             DBDumper
    33  	metadataReader io.Reader
    34  }
    35  
    36  type createResult struct {
    37  	archiveFile io.ReadCloser
    38  	size        int64
    39  	checksum    string
    40  }
    41  
    42  // create builds a new backup archive file and returns it.  It also
    43  // updates the metadata with the file info.
    44  func create(args *createArgs) (_ *createResult, err error) {
    45  	// Prepare the backup builder.
    46  	builder, err := newBuilder(args.filesToBackUp, args.db)
    47  	if err != nil {
    48  		return nil, errors.Trace(err)
    49  	}
    50  	defer func() {
    51  		if cerr := builder.cleanUp(); cerr != nil {
    52  			cerr.Log(logger)
    53  			if err == nil {
    54  				err = cerr
    55  			}
    56  		}
    57  	}()
    58  
    59  	// Inject the metadata file.
    60  	if args.metadataReader == nil {
    61  		return nil, errors.New("missing metadataReader")
    62  	}
    63  	if err := builder.injectMetadataFile(args.metadataReader); err != nil {
    64  		return nil, errors.Trace(err)
    65  	}
    66  
    67  	// Build the backup.
    68  	if err := builder.buildAll(); err != nil {
    69  		return nil, errors.Trace(err)
    70  	}
    71  
    72  	// Get the result.
    73  	result, err := builder.result()
    74  	if err != nil {
    75  		return nil, errors.Trace(err)
    76  	}
    77  
    78  	// Return the result.  Note that the entire build workspace will be
    79  	// deleted at the end of this function.  This includes the backup
    80  	// archive file we built.  However, the handle to that file in the
    81  	// result will still be open and readable.
    82  	// If we ever support state machines on Windows, this will need to
    83  	// change (you can't delete open files on Windows).
    84  	return result, nil
    85  }
    86  
    87  // builder exposes the machinery for creating a backup of juju's state.
    88  type builder struct {
    89  	// rootDir is the root of the archive workspace.
    90  	rootDir string
    91  	// archivePaths is the backups archive summary.
    92  	archivePaths ArchivePaths
    93  	// filename is the path to the archive file.
    94  	filename string
    95  	// filesToBackUp is the paths to every file to include in the archive.
    96  	filesToBackUp []string
    97  	// db is the wrapper around the DB dump command and args.
    98  	db DBDumper
    99  	// checksum is the checksum of the archive file.
   100  	checksum string
   101  	// archiveFile is the backup archive file.
   102  	archiveFile io.WriteCloser
   103  	// bundleFile is the inner archive file containing all the juju
   104  	// state-related files gathered during backup.
   105  	bundleFile io.WriteCloser
   106  }
   107  
   108  // newBuilder returns a new backup archive builder.  It creates the temp
   109  // directories which backup uses as its staging area while building the
   110  // archive.  It also creates the archive
   111  // (temp root, tarball root, DB dumpdir), along with any error.
   112  func newBuilder(filesToBackUp []string, db DBDumper) (b *builder, err error) {
   113  	// Create the backups workspace root directory.
   114  	rootDir, err := ioutil.TempDir("", tempPrefix)
   115  	if err != nil {
   116  		return nil, errors.Annotate(err, "while making backups workspace")
   117  	}
   118  
   119  	// Populate the builder.
   120  	b = &builder{
   121  		rootDir:       rootDir,
   122  		archivePaths:  NewNonCanonicalArchivePaths(rootDir),
   123  		filename:      filepath.Join(rootDir, tempFilename),
   124  		filesToBackUp: filesToBackUp,
   125  		db:            db,
   126  	}
   127  	defer func() {
   128  		if err != nil {
   129  			if cerr := b.cleanUp(); cerr != nil {
   130  				cerr.Log(logger)
   131  			}
   132  		}
   133  	}()
   134  
   135  	// Create all the direcories we need.  We go with user-only
   136  	// permissions on principle; the directories are short-lived so in
   137  	// practice it shouldn't matter much.
   138  	err = os.MkdirAll(b.archivePaths.DBDumpDir, 0700)
   139  	if err != nil {
   140  		return nil, errors.Annotate(err, "while creating temp directories")
   141  	}
   142  
   143  	// Create the archive files.  We do so here to fail as early as
   144  	// possible.
   145  	b.archiveFile, err = os.Create(b.filename)
   146  	if err != nil {
   147  		return nil, errors.Annotate(err, "while creating archive file")
   148  	}
   149  
   150  	b.bundleFile, err = os.Create(b.archivePaths.FilesBundle)
   151  	if err != nil {
   152  		return nil, errors.Annotate(err, `while creating bundle file`)
   153  	}
   154  
   155  	return b, nil
   156  }
   157  
   158  func (b *builder) closeArchiveFile() error {
   159  	// Currently this method isn't thread-safe (doesn't need to be).
   160  	if b.archiveFile == nil {
   161  		return nil
   162  	}
   163  
   164  	if err := b.archiveFile.Close(); err != nil {
   165  		return errors.Annotate(err, "while closing archive file")
   166  	}
   167  
   168  	b.archiveFile = nil
   169  	return nil
   170  }
   171  
   172  func (b *builder) closeBundleFile() error {
   173  	// Currently this method isn't thread-safe (doesn't need to be).
   174  	if b.bundleFile == nil {
   175  		return nil
   176  	}
   177  
   178  	if err := b.bundleFile.Close(); err != nil {
   179  		return errors.Annotate(err, "while closing bundle file")
   180  	}
   181  
   182  	b.bundleFile = nil
   183  	return nil
   184  }
   185  
   186  func (b *builder) removeRootDir() error {
   187  	// Currently this method isn't thread-safe (doesn't need to be).
   188  	if b.rootDir == "" {
   189  		panic("rootDir is unexpected empty")
   190  	}
   191  
   192  	if err := os.RemoveAll(b.rootDir); err != nil {
   193  		return errors.Annotate(err, "while removing backups temp dir")
   194  	}
   195  
   196  	return nil
   197  }
   198  
   199  type cleanupErrors struct {
   200  	Errors []error
   201  }
   202  
   203  func (e cleanupErrors) Error() string {
   204  	if len(e.Errors) == 1 {
   205  		return fmt.Sprintf("while cleaning up: %v", e.Errors[0])
   206  	} else {
   207  		return fmt.Sprintf("%d errors during cleanup", len(e.Errors))
   208  	}
   209  }
   210  
   211  func (e cleanupErrors) Log(logger loggo.Logger) {
   212  	logger.Errorf(e.Error())
   213  	for _, err := range e.Errors {
   214  		logger.Errorf(err.Error())
   215  	}
   216  }
   217  
   218  func (b *builder) cleanUp() *cleanupErrors {
   219  	var errors []error
   220  
   221  	if err := b.closeBundleFile(); err != nil {
   222  		errors = append(errors, err)
   223  	}
   224  	if err := b.closeArchiveFile(); err != nil {
   225  		errors = append(errors, err)
   226  	}
   227  	if err := b.removeRootDir(); err != nil {
   228  		errors = append(errors, err)
   229  	}
   230  
   231  	if errors != nil {
   232  		return &cleanupErrors{errors}
   233  	}
   234  	return nil
   235  }
   236  
   237  func (b *builder) injectMetadataFile(source io.Reader) error {
   238  	err := writeAll(b.archivePaths.MetadataFile, source)
   239  	return errors.Trace(err)
   240  }
   241  
   242  func writeAll(targetname string, source io.Reader) error {
   243  	target, err := os.Create(targetname)
   244  	if err != nil {
   245  		return errors.Annotatef(err, "while creating file %q", targetname)
   246  	}
   247  	_, err = io.Copy(target, source)
   248  	if err != nil {
   249  		target.Close()
   250  		return errors.Annotatef(err, "while copying into file %q", targetname)
   251  	}
   252  	return errors.Trace(target.Close())
   253  }
   254  
   255  func (b *builder) buildFilesBundle() error {
   256  	logger.Infof("dumping juju state-related files")
   257  	if len(b.filesToBackUp) == 0 {
   258  		return errors.New("missing list of files to back up")
   259  	}
   260  	if b.bundleFile == nil {
   261  		return errors.New("missing bundleFile")
   262  	}
   263  
   264  	stripPrefix := string(os.PathSeparator)
   265  	_, err := tar.TarFiles(b.filesToBackUp, b.bundleFile, stripPrefix)
   266  	if err != nil {
   267  		return errors.Annotate(err, "while bundling state-critical files")
   268  	}
   269  
   270  	return nil
   271  }
   272  
   273  func (b *builder) buildDBDump() error {
   274  	logger.Infof("dumping database")
   275  	if b.db == nil {
   276  		logger.Infof("nothing to do")
   277  		return nil
   278  	}
   279  
   280  	dumpDir := b.archivePaths.DBDumpDir
   281  	if err := b.db.Dump(dumpDir); err != nil {
   282  		return errors.Annotate(err, "while dumping juju state database")
   283  	}
   284  
   285  	return nil
   286  }
   287  
   288  func (b *builder) buildArchive(outFile io.Writer) error {
   289  	tarball := gzip.NewWriter(outFile)
   290  	defer tarball.Close()
   291  
   292  	// We add a trailing slash (or whatever) to root so that everything
   293  	// in the path up to and including that slash is stripped off when
   294  	// each file is added to the tar file.
   295  	stripPrefix := b.rootDir + string(os.PathSeparator)
   296  	filenames := []string{b.archivePaths.ContentDir}
   297  	if _, err := tar.TarFiles(filenames, tarball, stripPrefix); err != nil {
   298  		return errors.Annotate(err, "while bundling final archive")
   299  	}
   300  
   301  	return nil
   302  }
   303  
   304  func (b *builder) buildArchiveAndChecksum() error {
   305  	if b.archiveFile == nil {
   306  		return errors.New("missing archiveFile")
   307  	}
   308  	logger.Infof("building archive file %q", b.filename)
   309  
   310  	// Build the tarball, writing out to both the archive file and a
   311  	// SHA1 hash.  The hash will correspond to the gzipped file rather
   312  	// than to the uncompressed contents of the tarball.  This is so
   313  	// that users can compare the published checksum against the
   314  	// checksum of the file without having to decompress it first.
   315  	hasher := hash.NewHashingWriter(b.archiveFile, sha1.New())
   316  	if err := b.buildArchive(hasher); err != nil {
   317  		return errors.Trace(err)
   318  	}
   319  
   320  	// Save the SHA1 checksum.
   321  	// Gzip writers may buffer what they're writing so we must call
   322  	// Close() on the writer *before* getting the checksum from the
   323  	// hasher.
   324  	b.checksum = hasher.Base64Sum()
   325  
   326  	return nil
   327  }
   328  
   329  func (b *builder) buildAll() error {
   330  	// Dump the files.
   331  	if err := b.buildFilesBundle(); err != nil {
   332  		return errors.Trace(err)
   333  	}
   334  
   335  	// Dump the database.
   336  	if err := b.buildDBDump(); err != nil {
   337  		return errors.Trace(err)
   338  	}
   339  
   340  	// Bundle it all into a tarball.
   341  	if err := b.buildArchiveAndChecksum(); err != nil {
   342  		return errors.Trace(err)
   343  	}
   344  
   345  	return nil
   346  }
   347  
   348  // result returns a "create" result relative to the current state of the
   349  // builder.  create() uses this method to get the final backup result
   350  // from the builder it used.
   351  //
   352  // Note that create() calls builder.cleanUp() after it calls
   353  // builder.result().  cleanUp() causes the builder's workspace directory
   354  // to be deleted.  This means that while the file in the result is still
   355  // open, it no longer corresponds to any filename on the filesystem.
   356  // We do this to avoid leaving any temporary files around.  The
   357  // consequence is that we cannot simply return the temp filename, we
   358  // must leave the file open, and the caller is responsible for closing
   359  // the file (hence io.ReadCloser).
   360  func (b *builder) result() (*createResult, error) {
   361  	// Open the file in read-only mode.
   362  	file, err := os.Open(b.filename)
   363  	if err != nil {
   364  		return nil, errors.Annotate(err, "while opening archive file")
   365  	}
   366  
   367  	// Get the size.
   368  	stat, err := file.Stat()
   369  	if err != nil {
   370  		if err := file.Close(); err != nil {
   371  			// We don't want to just throw the error away.
   372  			err = errors.Annotate(err, "while closing file during handling of another error")
   373  			logger.Errorf(err.Error())
   374  		}
   375  		return nil, errors.Annotate(err, "while reading archive file info")
   376  	}
   377  	size := stat.Size()
   378  
   379  	// Get the checksum.
   380  	checksum := b.checksum
   381  
   382  	// Return the result.
   383  	result := createResult{
   384  		archiveFile: file,
   385  		size:        size,
   386  		checksum:    checksum,
   387  	}
   388  	return &result, nil
   389  }