github.com/niedbalski/juju@v0.0.0-20190215020005-8ff100488e47/state/backups/create.go (about) 1 // Copyright 2014 Canonical Ltd. 2 // Licensed under the AGPLv3, see LICENCE file for details. 3 4 package backups 5 6 import ( 7 "compress/gzip" 8 "crypto/sha1" 9 "fmt" 10 "io" 11 "io/ioutil" 12 "os" 13 "path/filepath" 14 15 "github.com/juju/errors" 16 "github.com/juju/loggo" 17 "github.com/juju/utils/hash" 18 "github.com/juju/utils/tar" 19 ) 20 21 // TODO(ericsnow) One concern is files that get out of date by the time 22 // backup finishes running. This is particularly a problem with log 23 // files. 24 25 const ( 26 tempPrefix = "jujuBackup-" 27 TempFilename = "juju-backup.tar.gz" 28 ) 29 30 type createArgs struct { 31 backupDir string 32 filesToBackUp []string 33 db DBDumper 34 metadataReader io.Reader 35 noDownload bool 36 } 37 38 type createResult struct { 39 archiveFile io.ReadCloser 40 size int64 41 checksum string 42 filename string 43 } 44 45 // create builds a new backup archive file and returns it. It also 46 // updates the metadata with the file info. 47 func create(args *createArgs) (_ *createResult, err error) { 48 // Prepare the backup builder. 49 builder, err := newBuilder(args.backupDir, args.filesToBackUp, args.db) 50 if err != nil { 51 return nil, errors.Trace(err) 52 } 53 defer func() { 54 if cerr := builder.cleanUp(args.noDownload); cerr != nil { 55 cerr.Log(logger) 56 if err == nil { 57 err = cerr 58 } 59 } 60 }() 61 // Inject the metadata file. 62 if args.metadataReader == nil { 63 return nil, errors.New("missing metadataReader") 64 } 65 if err := builder.injectMetadataFile(args.metadataReader); err != nil { 66 return nil, errors.Trace(err) 67 } 68 69 // Build the backup. 70 if err := builder.buildAll(); err != nil { 71 return nil, errors.Trace(err) 72 } 73 74 // Get the result. 75 result, err := builder.result() 76 if err != nil { 77 return nil, errors.Trace(err) 78 } 79 80 // Return the result. Note that the entire build workspace will be 81 // deleted at the end of this function. This includes the backup 82 // archive file we built. However, the handle to that file in the 83 // result will still be open and readable. 84 // If we ever support state machines on Windows, this will need to 85 // change (you can't delete open files on Windows). 86 return result, nil 87 } 88 89 // builder exposes the machinery for creating a backup of juju's state. 90 type builder struct { 91 // rootDir is the root of the archive workspace. 92 rootDir string 93 // archivePaths is the backups archive summary. 94 archivePaths ArchivePaths 95 // filename is the path to the archive file. 96 filename string 97 // filesToBackUp is the paths to every file to include in the archive. 98 filesToBackUp []string 99 // db is the wrapper around the DB dump command and args. 100 db DBDumper 101 // checksum is the checksum of the archive file. 102 checksum string 103 // archiveFile is the backup archive file. 104 archiveFile io.WriteCloser 105 // bundleFile is the inner archive file containing all the juju 106 // state-related files gathered during backup. 107 bundleFile io.WriteCloser 108 } 109 110 // newBuilder returns a new backup archive builder. It creates the temp 111 // directories which backup uses as its staging area while building the 112 // archive. It also creates the archive 113 // (temp root, tarball root, DB dumpdir), along with any error. 114 func newBuilder(backupDir string, filesToBackUp []string, db DBDumper) (b *builder, err error) { 115 // Create the backups workspace root directory. 116 rootDir, err := ioutil.TempDir(backupDir, tempPrefix) 117 if err != nil { 118 return nil, errors.Annotate(err, "while making backups workspace") 119 } 120 121 // Populate the builder. 122 b = &builder{ 123 rootDir: rootDir, 124 archivePaths: NewNonCanonicalArchivePaths(rootDir), 125 filename: filepath.Join(rootDir, TempFilename), 126 filesToBackUp: filesToBackUp, 127 db: db, 128 } 129 defer func() { 130 if err != nil { 131 if cerr := b.cleanUp(true); cerr != nil { 132 cerr.Log(logger) 133 } 134 } 135 }() 136 137 // Create all the direcories we need. We go with user-only 138 // permissions on principle; the directories are short-lived so in 139 // practice it shouldn't matter much. 140 err = os.MkdirAll(b.archivePaths.DBDumpDir, 0700) 141 if err != nil { 142 return nil, errors.Annotate(err, "while creating temp directories") 143 } 144 145 // Create the archive files. We do so here to fail as early as 146 // possible. 147 b.archiveFile, err = os.Create(b.filename) 148 if err != nil { 149 return nil, errors.Annotate(err, "while creating archive file") 150 } 151 152 b.bundleFile, err = os.Create(b.archivePaths.FilesBundle) 153 if err != nil { 154 return nil, errors.Annotate(err, `while creating bundle file`) 155 } 156 157 return b, nil 158 } 159 160 func (b *builder) closeArchiveFile() error { 161 // Currently this method isn't thread-safe (doesn't need to be). 162 if b.archiveFile == nil { 163 return nil 164 } 165 166 if err := b.archiveFile.Close(); err != nil { 167 return errors.Annotate(err, "while closing archive file") 168 } 169 170 b.archiveFile = nil 171 return nil 172 } 173 174 func (b *builder) closeBundleFile() error { 175 // Currently this method isn't thread-safe (doesn't need to be). 176 if b.bundleFile == nil { 177 return nil 178 } 179 180 if err := b.bundleFile.Close(); err != nil { 181 return errors.Annotate(err, "while closing bundle file") 182 } 183 184 b.bundleFile = nil 185 return nil 186 } 187 188 func (b *builder) removeRootDir() error { 189 // Currently this method isn't thread-safe (doesn't need to be). 190 if b.rootDir == "" { 191 panic(fmt.Sprintf("rootDir is unexpected empty, filename(%s)", b.filename)) 192 } 193 194 if err := os.RemoveAll(b.rootDir); err != nil { 195 return errors.Annotate(err, "while removing backups temp dir") 196 } 197 198 return nil 199 } 200 201 type cleanupErrors struct { 202 Errors []error 203 } 204 205 func (e cleanupErrors) Error() string { 206 if len(e.Errors) == 1 { 207 return fmt.Sprintf("while cleaning up: %v", e.Errors[0]) 208 } else { 209 return fmt.Sprintf("%d errors during cleanup", len(e.Errors)) 210 } 211 } 212 213 func (e cleanupErrors) Log(logger loggo.Logger) { 214 logger.Errorf(e.Error()) 215 for _, err := range e.Errors { 216 logger.Errorf(err.Error()) 217 } 218 } 219 220 func (b *builder) cleanUp(removeDir bool) *cleanupErrors { 221 var errors []error 222 223 if err := b.closeBundleFile(); err != nil { 224 errors = append(errors, err) 225 } 226 if err := b.closeArchiveFile(); err != nil { 227 errors = append(errors, err) 228 } 229 if removeDir { 230 if err := b.removeRootDir(); err != nil { 231 errors = append(errors, err) 232 } 233 } 234 235 if errors != nil { 236 return &cleanupErrors{errors} 237 } 238 return nil 239 } 240 241 func (b *builder) injectMetadataFile(source io.Reader) error { 242 err := writeAll(b.archivePaths.MetadataFile, source) 243 return errors.Trace(err) 244 } 245 246 func writeAll(targetname string, source io.Reader) error { 247 target, err := os.Create(targetname) 248 if err != nil { 249 return errors.Annotatef(err, "while creating file %q", targetname) 250 } 251 _, err = io.Copy(target, source) 252 if err != nil { 253 target.Close() 254 return errors.Annotatef(err, "while copying into file %q", targetname) 255 } 256 return errors.Trace(target.Close()) 257 } 258 259 func (b *builder) buildFilesBundle() error { 260 logger.Infof("dumping juju state-related files") 261 if len(b.filesToBackUp) == 0 { 262 return errors.New("missing list of files to back up") 263 } 264 if b.bundleFile == nil { 265 return errors.New("missing bundleFile") 266 } 267 268 stripPrefix := string(os.PathSeparator) 269 _, err := tar.TarFiles(b.filesToBackUp, b.bundleFile, stripPrefix) 270 if err != nil { 271 return errors.Annotate(err, "while bundling state-critical files") 272 } 273 274 return nil 275 } 276 277 func (b *builder) buildDBDump() error { 278 logger.Infof("dumping database") 279 if b.db == nil { 280 logger.Infof("nothing to do") 281 return nil 282 } 283 284 dumpDir := b.archivePaths.DBDumpDir 285 if err := b.db.Dump(dumpDir); err != nil { 286 return errors.Annotate(err, "while dumping juju state database") 287 } 288 289 return nil 290 } 291 292 func (b *builder) buildArchive(outFile io.Writer) error { 293 tarball := gzip.NewWriter(outFile) 294 defer tarball.Close() 295 296 // We add a trailing slash (or whatever) to root so that everything 297 // in the path up to and including that slash is stripped off when 298 // each file is added to the tar file. 299 stripPrefix := b.rootDir + string(os.PathSeparator) 300 filenames := []string{b.archivePaths.ContentDir} 301 if _, err := tar.TarFiles(filenames, tarball, stripPrefix); err != nil { 302 return errors.Annotate(err, "while bundling final archive") 303 } 304 305 return nil 306 } 307 308 func (b *builder) buildArchiveAndChecksum() error { 309 if b.archiveFile == nil { 310 return errors.New("missing archiveFile") 311 } 312 logger.Infof("building archive file %q", b.filename) 313 314 // Build the tarball, writing out to both the archive file and a 315 // SHA1 hash. The hash will correspond to the gzipped file rather 316 // than to the uncompressed contents of the tarball. This is so 317 // that users can compare the published checksum against the 318 // checksum of the file without having to decompress it first. 319 hasher := hash.NewHashingWriter(b.archiveFile, sha1.New()) 320 if err := b.buildArchive(hasher); err != nil { 321 return errors.Trace(err) 322 } 323 324 // Save the SHA1 checksum. 325 // Gzip writers may buffer what they're writing so we must call 326 // Close() on the writer *before* getting the checksum from the 327 // hasher. 328 b.checksum = hasher.Base64Sum() 329 330 return nil 331 } 332 333 func (b *builder) buildAll() error { 334 // Dump the files. 335 if err := b.buildFilesBundle(); err != nil { 336 return errors.Trace(err) 337 } 338 339 // Dump the database. 340 if err := b.buildDBDump(); err != nil { 341 return errors.Trace(err) 342 } 343 344 // Bundle it all into a tarball. 345 if err := b.buildArchiveAndChecksum(); err != nil { 346 return errors.Trace(err) 347 } 348 349 return nil 350 } 351 352 // result returns a "create" result relative to the current state of the 353 // builder. create() uses this method to get the final backup result 354 // from the builder it used. 355 // 356 // Note that create() calls builder.cleanUp() after it calls 357 // builder.result(). cleanUp() causes the builder's workspace directory 358 // to be deleted. This means that while the file in the result is still 359 // open, it no longer corresponds to any filename on the filesystem. 360 // We do this to avoid leaving any temporary files around. The 361 // consequence is that we cannot simply return the temp filename, we 362 // must leave the file open, and the caller is responsible for closing 363 // the file (hence io.ReadCloser). 364 func (b *builder) result() (*createResult, error) { 365 // Open the file in read-only mode. 366 file, err := os.Open(b.filename) 367 if err != nil { 368 return nil, errors.Annotate(err, "while opening archive file") 369 } 370 371 // Get the size. 372 stat, err := file.Stat() 373 if err != nil { 374 if err := file.Close(); err != nil { 375 // We don't want to just throw the error away. 376 err = errors.Annotate(err, "while closing file during handling of another error") 377 logger.Errorf(err.Error()) 378 } 379 return nil, errors.Annotate(err, "while reading archive file info") 380 } 381 size := stat.Size() 382 383 // Get the checksum. 384 checksum := b.checksum 385 386 // Return the result. 387 result := createResult{ 388 archiveFile: file, 389 size: size, 390 checksum: checksum, 391 filename: b.filename, 392 } 393 return &result, nil 394 }