vitess.io/vitess@v0.16.2/go/vt/mysqlctl/xtrabackupengine.go (about) 1 /* 2 Copyright 2019 The Vitess Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package mysqlctl 18 19 import ( 20 "bufio" 21 "context" 22 "encoding/json" 23 "fmt" 24 "io" 25 "os" 26 "os/exec" 27 "path" 28 "regexp" 29 "strings" 30 "sync" 31 "time" 32 33 "github.com/spf13/pflag" 34 35 "vitess.io/vitess/go/mysql" 36 "vitess.io/vitess/go/vt/logutil" 37 "vitess.io/vitess/go/vt/mysqlctl/backupstorage" 38 "vitess.io/vitess/go/vt/proto/vtrpc" 39 "vitess.io/vitess/go/vt/servenv" 40 "vitess.io/vitess/go/vt/vterrors" 41 ) 42 43 // XtrabackupEngine encapsulates the logic of the xtrabackup engine 44 // it implements the BackupEngine interface and contains all the logic 45 // required to implement a backup/restore by invoking xtrabackup with 46 // the appropriate parameters 47 type XtrabackupEngine struct { 48 } 49 50 var ( 51 // path where backup engine program is located 52 xtrabackupEnginePath string 53 // flags to pass through to backup phase 54 xtrabackupBackupFlags string 55 // flags to pass through to prepare phase of restore 56 xtrabackupPrepareFlags string 57 // flags to pass through to extract phase of restore 58 xbstreamRestoreFlags string 59 // streaming mode 60 xtrabackupStreamMode = "tar" 61 xtrabackupUser string 62 // striping mode 63 xtrabackupStripes uint 64 xtrabackupStripeBlockSize = uint(102400) 65 ) 66 67 const ( 68 streamModeTar = "tar" 69 xtrabackupBinaryName = "xtrabackup" 70 xtrabackupEngineName = "xtrabackup" 71 xbstream = "xbstream" 72 73 // closeTimeout is the timeout for closing backup files after writing. 74 closeTimeout = 10 * time.Minute 75 ) 76 77 // xtraBackupManifest represents a backup. 78 // It stores the name of the backup file, the replication position, 79 // whether the backup is compressed using gzip, and any extra 80 // command line parameters used while invoking it. 81 type xtraBackupManifest struct { 82 // BackupManifest is an anonymous embedding of the base manifest struct. 83 BackupManifest 84 // CompressionEngine stores which compression engine was originally provided 85 // to compress the files. Please note that if user has provided externalCompressorCmd 86 // then it will contain value 'external'. This field is used during restore routine to 87 // get a hint about what kind of compression was used. 88 CompressionEngine string `json:",omitempty"` 89 // Name of the backup file 90 FileName string 91 // Params are the parameters that backup was run with 92 Params string `json:"ExtraCommandLineParams"` 93 // StreamMode is the stream mode used to create this backup. 94 StreamMode string 95 // NumStripes is the number of stripes the file is split across, if any. 96 NumStripes int32 97 // StripeBlockSize is the size in bytes of each stripe block. 98 StripeBlockSize int32 99 100 // SkipCompress is true if the backup files were NOT run through gzip. 101 // The field is expressed as a negative because it will come through as 102 // false for backups that were created before the field existed, and those 103 // backups all had compression enabled. 104 SkipCompress bool 105 } 106 107 func init() { 108 for _, cmd := range []string{"vtcombo", "vttablet", "vtbackup", "vttestserver", "vtctldclient"} { 109 servenv.OnParseFor(cmd, registerXtraBackupEngineFlags) 110 } 111 } 112 113 func registerXtraBackupEngineFlags(fs *pflag.FlagSet) { 114 fs.StringVar(&xtrabackupEnginePath, "xtrabackup_root_path", xtrabackupEnginePath, "Directory location of the xtrabackup and xbstream executables, e.g., /usr/bin") 115 fs.StringVar(&xtrabackupBackupFlags, "xtrabackup_backup_flags", xtrabackupBackupFlags, "Flags to pass to backup command. These should be space separated and will be added to the end of the command") 116 fs.StringVar(&xtrabackupPrepareFlags, "xtrabackup_prepare_flags", xtrabackupPrepareFlags, "Flags to pass to prepare command. These should be space separated and will be added to the end of the command") 117 fs.StringVar(&xbstreamRestoreFlags, "xbstream_restore_flags", xbstreamRestoreFlags, "Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt") 118 fs.StringVar(&xtrabackupStreamMode, "xtrabackup_stream_mode", xtrabackupStreamMode, "Which mode to use if streaming, valid values are tar and xbstream. Please note that tar is not supported in XtraBackup 8.0") 119 fs.StringVar(&xtrabackupUser, "xtrabackup_user", xtrabackupUser, "User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.") 120 fs.UintVar(&xtrabackupStripes, "xtrabackup_stripes", xtrabackupStripes, "If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression") 121 fs.UintVar(&xtrabackupStripeBlockSize, "xtrabackup_stripe_block_size", xtrabackupStripeBlockSize, "Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe") 122 } 123 124 func (be *XtrabackupEngine) backupFileName() string { 125 fileName := "backup" 126 if xtrabackupStreamMode != "" { 127 fileName += "." 128 fileName += xtrabackupStreamMode 129 } 130 if backupStorageCompress { 131 if ExternalDecompressorCmd != "" { 132 fileName += ExternalCompressorExt 133 } else { 134 if ext, err := getExtensionFromEngine(CompressionEngineName); err != nil { 135 // there is a check for this, but just in case that fails, we set a extension to the file 136 fileName += ".unknown" 137 } else { 138 fileName += ext 139 } 140 } 141 } 142 return fileName 143 } 144 145 func closeFile(wc io.WriteCloser, fileName string, logger logutil.Logger, finalErr *error) { 146 logger.Infof("Closing backup file %v", fileName) 147 if closeErr := wc.Close(); *finalErr == nil { 148 *finalErr = closeErr 149 } else if closeErr != nil { 150 // since we already have an error just log this 151 logger.Errorf("error closing file %v: %v", fileName, closeErr) 152 } 153 } 154 155 // ExecuteBackup returns a boolean that indicates if the backup is usable, 156 // and an overall error. 157 func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (complete bool, finalErr error) { 158 159 if params.IncrementalFromPos != "" { 160 return false, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "incremental backups not supported in xtrabackup engine.") 161 } 162 if xtrabackupUser == "" { 163 return false, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "xtrabackupUser must be specified.") 164 } 165 166 // an extension is required when using an external compressor 167 if backupStorageCompress && ExternalCompressorCmd != "" && ExternalCompressorExt == "" { 168 return false, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, 169 "flag --external-compressor-extension not provided when using an external compressor") 170 } 171 172 // use a mysql connection to detect flavor at runtime 173 conn, err := params.Mysqld.GetDbaConnection(ctx) 174 if conn != nil && err == nil { 175 defer conn.Close() 176 } 177 178 if err != nil { 179 return false, vterrors.Wrap(err, "unable to obtain a connection to the database") 180 } 181 pos, err := conn.PrimaryPosition() 182 if err != nil { 183 return false, vterrors.Wrap(err, "unable to obtain primary position") 184 } 185 serverUUID, err := conn.GetServerUUID() 186 if err != nil { 187 return false, vterrors.Wrap(err, "can't get server uuid") 188 } 189 190 flavor := pos.GTIDSet.Flavor() 191 params.Logger.Infof("Detected MySQL flavor: %v", flavor) 192 193 backupFileName := be.backupFileName() 194 params.Logger.Infof("backup file name: %s", backupFileName) 195 numStripes := int(xtrabackupStripes) 196 197 // Perform backups in a separate function, so deferred calls to Close() are 198 // all done before we continue to write the MANIFEST. This ensures that we 199 // do not write the MANIFEST unless all files were closed successfully, 200 // maintaining the contract that a MANIFEST file should only exist if the 201 // backup was created successfully. 202 params.Logger.Infof("Starting backup with %v stripe(s)", numStripes) 203 replicationPosition, err := be.backupFiles(ctx, params, bh, backupFileName, numStripes, flavor) 204 if err != nil { 205 return false, err 206 } 207 208 // open the MANIFEST 209 params.Logger.Infof("Writing backup MANIFEST") 210 mwc, err := bh.AddFile(ctx, backupManifestFileName, backupstorage.FileSizeUnknown) 211 if err != nil { 212 return false, vterrors.Wrapf(err, "cannot add %v to backup", backupManifestFileName) 213 } 214 defer closeFile(mwc, backupManifestFileName, params.Logger, &finalErr) 215 216 // JSON-encode and write the MANIFEST 217 bm := &xtraBackupManifest{ 218 // Common base fields 219 BackupManifest: BackupManifest{ 220 BackupMethod: xtrabackupEngineName, 221 Position: replicationPosition, 222 ServerUUID: serverUUID, 223 TabletAlias: params.TabletAlias, 224 Keyspace: params.Keyspace, 225 Shard: params.Shard, 226 BackupTime: params.BackupTime.UTC().Format(time.RFC3339), 227 FinishedTime: time.Now().UTC().Format(time.RFC3339), 228 }, 229 230 // XtraBackup-specific fields 231 FileName: backupFileName, 232 StreamMode: xtrabackupStreamMode, 233 SkipCompress: !backupStorageCompress, 234 Params: xtrabackupBackupFlags, 235 NumStripes: int32(numStripes), 236 StripeBlockSize: int32(xtrabackupStripeBlockSize), 237 // builtin specific field 238 CompressionEngine: CompressionEngineName, 239 } 240 241 data, err := json.MarshalIndent(bm, "", " ") 242 if err != nil { 243 return false, vterrors.Wrapf(err, "cannot JSON encode %v", backupManifestFileName) 244 } 245 if _, err := mwc.Write([]byte(data)); err != nil { 246 return false, vterrors.Wrapf(err, "cannot write %v", backupManifestFileName) 247 } 248 249 params.Logger.Infof("Backup completed") 250 return true, nil 251 } 252 253 func (be *XtrabackupEngine) backupFiles(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle, backupFileName string, numStripes int, flavor string) (replicationPosition mysql.Position, finalErr error) { 254 255 backupProgram := path.Join(xtrabackupEnginePath, xtrabackupBinaryName) 256 flagsToExec := []string{"--defaults-file=" + params.Cnf.Path, 257 "--backup", 258 "--socket=" + params.Cnf.SocketFile, 259 "--slave-info", 260 "--user=" + xtrabackupUser, 261 "--target-dir=" + params.Cnf.TmpDir, 262 } 263 if xtrabackupStreamMode != "" { 264 flagsToExec = append(flagsToExec, "--stream="+xtrabackupStreamMode) 265 } 266 if xtrabackupBackupFlags != "" { 267 flagsToExec = append(flagsToExec, strings.Fields(xtrabackupBackupFlags)...) 268 } 269 270 // Create a cancellable Context for calls to bh.AddFile(). 271 // This allows us to decide later if we need to cancel an attempt to Close() 272 // the file returned from AddFile(), since Close() itself does not accept a 273 // Context value. We can't use a context.WithTimeout() here because that 274 // would impose a timeout that starts counting right now, so it would 275 // include the time spent uploading the file content. We only want to impose 276 // a timeout on the final Close() step. 277 addFilesCtx, cancelAddFiles := context.WithCancel(ctx) 278 defer cancelAddFiles() 279 destFiles, err := addStripeFiles(addFilesCtx, params, bh, backupFileName, numStripes) 280 if err != nil { 281 return replicationPosition, vterrors.Wrapf(err, "cannot create backup file %v", backupFileName) 282 } 283 defer func() { 284 // Impose a timeout on the process of closing files. 285 go func() { 286 timer := time.NewTimer(closeTimeout) 287 288 select { 289 case <-addFilesCtx.Done(): 290 timer.Stop() 291 return 292 case <-timer.C: 293 params.Logger.Errorf("Timed out waiting for Close() on backup file to complete") 294 // Cancelling the Context that was originally passed to bh.AddFile() 295 // should hopefully cause Close() calls on the file that AddFile() 296 // returned to abort. If the underlying implementation doesn't 297 // respect cancellation of the AddFile() Context while inside 298 // Close(), then we just hang because it's unsafe to return and 299 // leave Close() running indefinitely in the background. 300 cancelAddFiles() 301 } 302 }() 303 304 filename := backupFileName 305 for i, file := range destFiles { 306 if numStripes > 1 { 307 filename = stripeFileName(backupFileName, i) 308 } 309 closeFile(file, filename, params.Logger, &finalErr) 310 } 311 }() 312 313 backupCmd := exec.CommandContext(ctx, backupProgram, flagsToExec...) 314 backupOut, err := backupCmd.StdoutPipe() 315 if err != nil { 316 return replicationPosition, vterrors.Wrap(err, "cannot create stdout pipe") 317 } 318 backupErr, err := backupCmd.StderrPipe() 319 if err != nil { 320 return replicationPosition, vterrors.Wrap(err, "cannot create stderr pipe") 321 } 322 323 destWriters := []io.Writer{} 324 destBuffers := []*bufio.Writer{} 325 destCompressors := []io.WriteCloser{} 326 for _, file := range destFiles { 327 buffer := bufio.NewWriterSize(file, writerBufferSize) 328 destBuffers = append(destBuffers, buffer) 329 writer := io.Writer(buffer) 330 331 // Create the gzip compression pipe, if necessary. 332 if backupStorageCompress { 333 var compressor io.WriteCloser 334 335 if ExternalCompressorCmd != "" { 336 compressor, err = newExternalCompressor(ctx, ExternalCompressorCmd, writer, params.Logger) 337 } else { 338 compressor, err = newBuiltinCompressor(CompressionEngineName, writer, params.Logger) 339 } 340 if err != nil { 341 return replicationPosition, vterrors.Wrap(err, "can't create compressor") 342 } 343 344 writer = compressor 345 destCompressors = append(destCompressors, compressor) 346 } 347 348 destWriters = append(destWriters, writer) 349 } 350 351 if err = backupCmd.Start(); err != nil { 352 return replicationPosition, vterrors.Wrap(err, "unable to start backup") 353 } 354 355 // Read stderr in the background, so we can log progress as xtrabackup runs. 356 // Also save important lines of the output so we can parse it later to find 357 // the replication position. Note that if we don't read stderr as we go, the 358 // xtrabackup process gets blocked when the write buffer fills up. 359 stderrBuilder := &strings.Builder{} 360 posBuilder := &strings.Builder{} 361 stderrDone := make(chan struct{}) 362 go func() { 363 defer close(stderrDone) 364 365 scanner := bufio.NewScanner(backupErr) 366 capture := false 367 for scanner.Scan() { 368 line := scanner.Text() 369 params.Logger.Infof("xtrabackup stderr: %s", line) 370 371 // Wait until we see the first line of the binlog position. 372 // Then capture all subsequent lines. We need multiple lines since 373 // the value we're looking for has newlines in it. 374 if !capture { 375 if !strings.Contains(line, "MySQL binlog position") { 376 continue 377 } 378 capture = true 379 } 380 fmt.Fprintln(posBuilder, line) 381 } 382 if err := scanner.Err(); err != nil { 383 params.Logger.Errorf("error reading from xtrabackup stderr: %v", err) 384 } 385 }() 386 387 // Copy from the stream output to destination file (optional gzip) 388 blockSize := int64(xtrabackupStripeBlockSize) 389 if blockSize < 1024 { 390 // Enforce minimum block size. 391 blockSize = 1024 392 } 393 // Add a buffer in front of the raw stdout pipe so io.CopyN() can use the 394 // buffered reader's WriteTo() method instead of allocating a new buffer 395 // every time. 396 backupOutBuf := bufio.NewReaderSize(backupOut, int(blockSize)) 397 if _, err := copyToStripes(destWriters, backupOutBuf, blockSize); err != nil { 398 return replicationPosition, vterrors.Wrap(err, "cannot copy output from xtrabackup command") 399 } 400 401 // Close compressor to flush it. After that all data is sent to the buffer. 402 for _, compressor := range destCompressors { 403 if err := compressor.Close(); err != nil { 404 return replicationPosition, vterrors.Wrap(err, "cannot close compressor") 405 } 406 } 407 408 // Flush the buffer to finish writing on destination. 409 for _, buffer := range destBuffers { 410 if err = buffer.Flush(); err != nil { 411 return replicationPosition, vterrors.Wrapf(err, "cannot flush destination: %v", backupFileName) 412 } 413 } 414 415 // Wait for stderr scanner to stop. 416 <-stderrDone 417 // Get the final (filtered) stderr output. 418 sterrOutput := stderrBuilder.String() 419 420 if err := backupCmd.Wait(); err != nil { 421 return replicationPosition, vterrors.Wrap(err, fmt.Sprintf("xtrabackup failed with error. Output=%s", sterrOutput)) 422 } 423 424 posOutput := posBuilder.String() 425 replicationPosition, rerr := findReplicationPosition(posOutput, flavor, params.Logger) 426 if rerr != nil { 427 return replicationPosition, vterrors.Wrap(rerr, "backup failed trying to find replication position") 428 } 429 430 return replicationPosition, nil 431 } 432 433 // ExecuteRestore restores from a backup. Any error is returned. 434 func (be *XtrabackupEngine) ExecuteRestore(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle) (*BackupManifest, error) { 435 436 var bm xtraBackupManifest 437 438 if err := getBackupManifestInto(ctx, bh, &bm); err != nil { 439 return nil, err 440 } 441 442 // mark restore as in progress 443 if err := createStateFile(params.Cnf); err != nil { 444 return nil, err 445 } 446 447 if err := prepareToRestore(ctx, params.Cnf, params.Mysqld, params.Logger); err != nil { 448 return nil, err 449 } 450 451 // copy / extract files 452 params.Logger.Infof("Restore: Extracting files from %v", bm.FileName) 453 454 if err := be.restoreFromBackup(ctx, params.Cnf, bh, bm, params.Logger); err != nil { 455 // don't delete the file here because that is how we detect an interrupted restore 456 return nil, err 457 } 458 // now find the replication position and return that 459 params.Logger.Infof("Restore: returning replication position %v", bm.Position) 460 return &bm.BackupManifest, nil 461 } 462 463 func (be *XtrabackupEngine) restoreFromBackup(ctx context.Context, cnf *Mycnf, bh backupstorage.BackupHandle, bm xtraBackupManifest, logger logutil.Logger) error { 464 // first download the file into a tmp dir 465 // and extract all the files 466 tempDir := fmt.Sprintf("%v/%v", cnf.TmpDir, time.Now().UTC().Format("xtrabackup-2006-01-02.150405")) 467 // create tempDir 468 if err := os.MkdirAll(tempDir, os.ModePerm); err != nil { 469 return err 470 } 471 // delete tempDir once we are done 472 defer func(dir string, l logutil.Logger) { 473 err := os.RemoveAll(dir) 474 if err != nil { 475 l.Errorf("error deleting tempDir(%v): %v", dir, err) 476 } 477 }(tempDir, logger) 478 479 // For optimization, we are replacing pargzip with pgzip, so newBuiltinDecompressor doesn't have to compare and print warning for every file 480 // since newBuiltinDecompressor is helper method and does not hold any state, it was hard to do it in that method itself. 481 if bm.CompressionEngine == PargzipCompressor { 482 logger.Warningf(`engine "pargzip" doesn't support decompression, using "pgzip" instead`) 483 bm.CompressionEngine = PgzipCompressor 484 defer func() { 485 bm.CompressionEngine = PargzipCompressor 486 }() 487 } 488 489 if err := be.extractFiles(ctx, logger, bh, bm, tempDir); err != nil { 490 logger.Errorf("error extracting backup files: %v", err) 491 return err 492 } 493 494 // copy / extract files 495 logger.Infof("Restore: Preparing the extracted files") 496 // prepare the backup 497 restoreProgram := path.Join(xtrabackupEnginePath, xtrabackupBinaryName) 498 flagsToExec := []string{"--defaults-file=" + cnf.Path, 499 "--prepare", 500 "--target-dir=" + tempDir, 501 } 502 if xtrabackupPrepareFlags != "" { 503 flagsToExec = append(flagsToExec, strings.Fields(xtrabackupPrepareFlags)...) 504 } 505 prepareCmd := exec.CommandContext(ctx, restoreProgram, flagsToExec...) 506 prepareOut, err := prepareCmd.StdoutPipe() 507 if err != nil { 508 return vterrors.Wrap(err, "cannot create stdout pipe") 509 } 510 prepareErr, err := prepareCmd.StderrPipe() 511 if err != nil { 512 return vterrors.Wrap(err, "cannot create stderr pipe") 513 } 514 if err := prepareCmd.Start(); err != nil { 515 return vterrors.Wrap(err, "can't start prepare step") 516 } 517 518 // Read stdout/stderr in the background and send each line to the logger. 519 prepareWg := &sync.WaitGroup{} 520 prepareWg.Add(2) 521 go scanLinesToLogger("prepare stdout", prepareOut, logger, prepareWg.Done) 522 go scanLinesToLogger("prepare stderr", prepareErr, logger, prepareWg.Done) 523 prepareWg.Wait() 524 525 // Get exit status. 526 if err := prepareCmd.Wait(); err != nil { 527 return vterrors.Wrap(err, "prepare step failed") 528 } 529 530 // then move-back 531 logger.Infof("Restore: Move extracted and prepared files to final locations") 532 533 flagsToExec = []string{"--defaults-file=" + cnf.Path, 534 "--move-back", 535 "--target-dir=" + tempDir, 536 } 537 movebackCmd := exec.CommandContext(ctx, restoreProgram, flagsToExec...) 538 movebackOut, err := movebackCmd.StdoutPipe() 539 if err != nil { 540 return vterrors.Wrap(err, "cannot create stdout pipe") 541 } 542 movebackErr, err := movebackCmd.StderrPipe() 543 if err != nil { 544 return vterrors.Wrap(err, "cannot create stderr pipe") 545 } 546 if err := movebackCmd.Start(); err != nil { 547 return vterrors.Wrap(err, "can't start move-back step") 548 } 549 550 // Read stdout/stderr in the background and send each line to the logger. 551 movebackWg := &sync.WaitGroup{} 552 movebackWg.Add(2) 553 go scanLinesToLogger("move-back stdout", movebackOut, logger, movebackWg.Done) 554 go scanLinesToLogger("move-back stderr", movebackErr, logger, movebackWg.Done) 555 movebackWg.Wait() 556 557 // Get exit status. 558 if err := movebackCmd.Wait(); err != nil { 559 return vterrors.Wrap(err, "move-back step failed") 560 } 561 562 return nil 563 } 564 565 // restoreFile extracts all the files from the backup archive 566 func (be *XtrabackupEngine) extractFiles(ctx context.Context, logger logutil.Logger, bh backupstorage.BackupHandle, bm xtraBackupManifest, tempDir string) error { 567 // Pull details from the MANIFEST where available, so we can still restore 568 // backups taken with different flags. Some fields were not always present, 569 // so if necessary we default to the flag values. 570 compressed := !bm.SkipCompress 571 streamMode := bm.StreamMode 572 if streamMode == "" { 573 streamMode = xtrabackupStreamMode 574 } 575 baseFileName := bm.FileName 576 if baseFileName == "" { 577 baseFileName = be.backupFileName() 578 } 579 580 logger.Infof("backup file name: %s", baseFileName) 581 // Open the source files for reading. 582 srcFiles, err := readStripeFiles(ctx, bh, baseFileName, int(bm.NumStripes), logger) 583 if err != nil { 584 return vterrors.Wrapf(err, "cannot open backup file %v", baseFileName) 585 } 586 defer func() { 587 for _, file := range srcFiles { 588 file.Close() 589 } 590 }() 591 592 srcReaders := []io.Reader{} 593 srcDecompressors := []io.ReadCloser{} 594 for _, file := range srcFiles { 595 reader := io.Reader(file) 596 597 // Create the decompressor if needed. 598 if compressed { 599 var decompressor io.ReadCloser 600 var deCompressionEngine = bm.CompressionEngine 601 if deCompressionEngine == "" { 602 // For backward compatibility. Incase if Manifest is from N-1 binary 603 // then we assign the default value of compressionEngine. 604 deCompressionEngine = PgzipCompressor 605 } 606 if ExternalDecompressorCmd != "" { 607 if deCompressionEngine == ExternalCompressor { 608 deCompressionEngine = ExternalDecompressorCmd 609 decompressor, err = newExternalDecompressor(ctx, deCompressionEngine, reader, logger) 610 } else { 611 decompressor, err = newBuiltinDecompressor(deCompressionEngine, reader, logger) 612 } 613 } else { 614 if deCompressionEngine == ExternalCompressor { 615 return fmt.Errorf("%w %q", errUnsupportedCompressionEngine, ExternalCompressor) 616 } 617 decompressor, err = newBuiltinDecompressor(deCompressionEngine, reader, logger) 618 } 619 if err != nil { 620 return vterrors.Wrap(err, "can't create decompressor") 621 } 622 srcDecompressors = append(srcDecompressors, decompressor) 623 reader = decompressor 624 } 625 626 srcReaders = append(srcReaders, reader) 627 } 628 defer func() { 629 for _, decompressor := range srcDecompressors { 630 if cerr := decompressor.Close(); cerr != nil { 631 logger.Errorf("failed to close decompressor: %v", cerr) 632 } 633 } 634 }() 635 636 reader := stripeReader(srcReaders, int64(bm.StripeBlockSize)) 637 638 switch streamMode { 639 case streamModeTar: 640 // now extract the files by running tar 641 // error if we can't find tar 642 flagsToExec := []string{"-C", tempDir, "-xiv"} 643 tarCmd := exec.CommandContext(ctx, "tar", flagsToExec...) 644 logger.Infof("Executing tar cmd with flags %v", flagsToExec) 645 tarCmd.Stdin = reader 646 tarOut, err := tarCmd.StdoutPipe() 647 if err != nil { 648 return vterrors.Wrap(err, "cannot create stdout pipe") 649 } 650 tarErr, err := tarCmd.StderrPipe() 651 if err != nil { 652 return vterrors.Wrap(err, "cannot create stderr pipe") 653 } 654 if err := tarCmd.Start(); err != nil { 655 return vterrors.Wrap(err, "can't start tar") 656 } 657 658 // Read stdout/stderr in the background and send each line to the logger. 659 tarWg := &sync.WaitGroup{} 660 tarWg.Add(2) 661 go scanLinesToLogger("tar stdout", tarOut, logger, tarWg.Done) 662 go scanLinesToLogger("tar stderr", tarErr, logger, tarWg.Done) 663 tarWg.Wait() 664 665 // Get exit status. 666 if err := tarCmd.Wait(); err != nil { 667 return vterrors.Wrap(err, "tar failed") 668 } 669 670 case xbstream: 671 // now extract the files by running xbstream 672 xbstreamProgram := path.Join(xtrabackupEnginePath, xbstream) 673 flagsToExec := []string{"-C", tempDir, "-xv"} 674 if xbstreamRestoreFlags != "" { 675 flagsToExec = append(flagsToExec, strings.Fields(xbstreamRestoreFlags)...) 676 } 677 xbstreamCmd := exec.CommandContext(ctx, xbstreamProgram, flagsToExec...) 678 logger.Infof("Executing xbstream cmd: %v %v", xbstreamProgram, flagsToExec) 679 xbstreamCmd.Stdin = reader 680 xbstreamOut, err := xbstreamCmd.StdoutPipe() 681 if err != nil { 682 return vterrors.Wrap(err, "cannot create stdout pipe") 683 } 684 xbstreamErr, err := xbstreamCmd.StderrPipe() 685 if err != nil { 686 return vterrors.Wrap(err, "cannot create stderr pipe") 687 } 688 if err := xbstreamCmd.Start(); err != nil { 689 return vterrors.Wrap(err, "can't start xbstream") 690 } 691 692 // Read stdout/stderr in the background and send each line to the logger. 693 xbstreamWg := &sync.WaitGroup{} 694 xbstreamWg.Add(2) 695 go scanLinesToLogger("xbstream stdout", xbstreamOut, logger, xbstreamWg.Done) 696 go scanLinesToLogger("xbstream stderr", xbstreamErr, logger, xbstreamWg.Done) 697 xbstreamWg.Wait() 698 699 // Get exit status. 700 if err := xbstreamCmd.Wait(); err != nil { 701 return vterrors.Wrap(err, "xbstream failed") 702 } 703 default: 704 return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "%v is not a valid value for xtrabackup_stream_mode, supported modes are tar and xbstream", streamMode) 705 } 706 return nil 707 } 708 709 var xtrabackupReplicationPositionRegexp = regexp.MustCompile(`GTID of the last change '([^']*)'`) 710 711 func findReplicationPosition(input, flavor string, logger logutil.Logger) (mysql.Position, error) { 712 match := xtrabackupReplicationPositionRegexp.FindStringSubmatch(input) 713 if match == nil || len(match) != 2 { 714 return mysql.Position{}, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "couldn't find replication position in xtrabackup stderr output") 715 } 716 position := match[1] 717 // Remove all spaces, tabs, and newlines. 718 position = strings.Replace(position, " ", "", -1) 719 position = strings.Replace(position, "\t", "", -1) 720 position = strings.Replace(position, "\n", "", -1) 721 logger.Infof("Found position: %v", position) 722 if position == "" { 723 return mysql.Position{}, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "empty replication position from xtrabackup") 724 } 725 726 // flavor is required to parse a string into a mysql.Position 727 replicationPosition, err := mysql.ParsePosition(flavor, position) 728 if err != nil { 729 return mysql.Position{}, vterrors.Wrapf(err, "can't parse replication position from xtrabackup: %v", position) 730 } 731 return replicationPosition, nil 732 } 733 734 // scanLinesToLogger scans full lines from the given Reader and sends them to 735 // the given Logger until EOF. 736 func scanLinesToLogger(prefix string, reader io.Reader, logger logutil.Logger, doneFunc func()) { 737 defer doneFunc() 738 739 scanner := bufio.NewScanner(reader) 740 for scanner.Scan() { 741 line := scanner.Text() 742 logger.Infof("%s: %s", prefix, line) 743 } 744 if err := scanner.Err(); err != nil { 745 // This is usually run in a background goroutine, so there's no point 746 // returning an error. Just log it. 747 logger.Warningf("error scanning lines from %s: %v", prefix, err) 748 } 749 } 750 751 func stripeFileName(baseFileName string, index int) string { 752 return fmt.Sprintf("%s-%03d", baseFileName, index) 753 } 754 755 func addStripeFiles(ctx context.Context, params BackupParams, backupHandle backupstorage.BackupHandle, baseFileName string, numStripes int) ([]io.WriteCloser, error) { 756 // Compute total size of all files we will backup. 757 // We delegate the actual backing up to xtrabackup which streams 758 // the files as a single archive (tar / xbstream), which might 759 // further be compressed using gzip. 760 // This approximate total size is passed in to AddFile so that 761 // storage plugins can make appropriate choices for parameters 762 // like partSize in multi-part uploads 763 _, totalSize, err := findFilesToBackup(params.Cnf) 764 if err != nil { 765 return nil, err 766 } 767 768 if numStripes <= 1 { 769 // No striping. 770 file, err := backupHandle.AddFile(ctx, baseFileName, totalSize) 771 return []io.WriteCloser{file}, err 772 } 773 774 files := []io.WriteCloser{} 775 for i := 0; i < numStripes; i++ { 776 filename := stripeFileName(baseFileName, i) 777 params.Logger.Infof("Opening backup stripe file %v", filename) 778 file, err := backupHandle.AddFile(ctx, filename, totalSize/int64(numStripes)) 779 if err != nil { 780 // Close any files we already opened and clear them from the result. 781 for _, file := range files { 782 if err := file.Close(); err != nil { 783 params.Logger.Warningf("error closing backup stripe file: %v", err) 784 } 785 } 786 return nil, err 787 } 788 files = append(files, file) 789 } 790 791 return files, nil 792 } 793 794 func readStripeFiles(ctx context.Context, backupHandle backupstorage.BackupHandle, baseFileName string, numStripes int, logger logutil.Logger) ([]io.ReadCloser, error) { 795 if numStripes <= 1 { 796 // No striping. 797 file, err := backupHandle.ReadFile(ctx, baseFileName) 798 return []io.ReadCloser{file}, err 799 } 800 801 files := []io.ReadCloser{} 802 for i := 0; i < numStripes; i++ { 803 file, err := backupHandle.ReadFile(ctx, stripeFileName(baseFileName, i)) 804 if err != nil { 805 // Close any files we already opened and clear them from the result. 806 for _, file := range files { 807 if err := file.Close(); err != nil { 808 logger.Warningf("error closing backup stripe file: %v", err) 809 } 810 } 811 return nil, err 812 } 813 files = append(files, file) 814 } 815 816 return files, nil 817 } 818 819 func copyToStripes(writers []io.Writer, reader io.Reader, blockSize int64) (written int64, err error) { 820 if len(writers) == 1 { 821 // Not striped. 822 return io.Copy(writers[0], reader) 823 } 824 825 // Read blocks from source and round-robin them to destination writers. 826 // Since we put a buffer in front of the destination file, and pargzip has its 827 // own buffer as well, we are writing into a buffer either way (whether a 828 // compressor is in the chain or not). That means these writes should not 829 // block often, so we shouldn't need separate goroutines here. 830 destIndex := 0 831 for { 832 // Copy blockSize bytes to this writer before rotating to the next one. 833 // The only acceptable reason for copying less than blockSize bytes is EOF. 834 n, err := io.CopyN(writers[destIndex], reader, blockSize) 835 written += n 836 if err == io.EOF { 837 // We're done. 838 return written, nil 839 } 840 if err != nil { 841 // If we failed to copy exactly blockSize bytes for any reason other 842 // than EOF, we must abort. 843 return written, err 844 } 845 846 // Rotate to the next writer. 847 destIndex++ 848 if destIndex == len(writers) { 849 destIndex = 0 850 } 851 } 852 } 853 854 func stripeReader(readers []io.Reader, blockSize int64) io.Reader { 855 if len(readers) == 1 { 856 // No striping. 857 return readers[0] 858 } 859 860 // Make a pipe to convert our overall Writer into a Reader. 861 // We will launch a goroutine to write to the write half of the pipe, 862 // and return the read half to the caller. 863 reader, writer := io.Pipe() 864 865 go func() { 866 // Read blocks from each source in round-robin and send them to the pipe. 867 // When using pgzip, there is already a read-ahead goroutine for every 868 // source, so we don't need to launch one for each source. 869 // TODO: See if we need to add read-ahead goroutines for the case when 870 // compression is not enabled in order to get any benefit to restore 871 // parallelism from data striping. 872 srcIndex := 0 873 for { 874 // Copy blockSize bytes from this reader before rotating to the next one. 875 // The only acceptable reason for copying less than blockSize bytes is EOF. 876 n, err := io.CopyN(writer, readers[srcIndex], blockSize) 877 if err != nil { 878 // If we failed to copy exactly blockSize bytes for any 879 // reason other than EOF, we must abort. 880 if err != io.EOF { 881 writer.CloseWithError(err) 882 return 883 } 884 885 // If we hit EOF after copying less than the blockSize from 886 // this reader, we must be done. 887 if n < blockSize { 888 // Close the write half so the read half gets EOF. 889 writer.Close() 890 return 891 } 892 // If we hit EOF after copying exactly blockSize bytes, then we 893 // need to keep checking the rest of the stripes until one of 894 // them returns EOF with n < blockSize. 895 } 896 897 // Rotate to the next writer. 898 srcIndex++ 899 if srcIndex == len(readers) { 900 srcIndex = 0 901 } 902 } 903 }() 904 905 return reader 906 } 907 908 // ShouldDrainForBackup satisfies the BackupEngine interface 909 // xtrabackup can run while tablet is serving, hence false 910 func (be *XtrabackupEngine) ShouldDrainForBackup() bool { 911 return false 912 } 913 914 func init() { 915 BackupRestoreEngineMap[xtrabackupEngineName] = &XtrabackupEngine{} 916 }