github.com/unigraph-dev/dgraph@v1.1.1-0.20200923154953-8b52b426f765/ee/backup/tests/filesystem/backup_test.go (about)

     1  /*
     2   * Copyright 2018 Dgraph Labs, Inc. and Contributors *
     3   * Licensed under the Apache License, Version 2.0 (the "License");
     4   * you may not use this file except in compliance with the License.
     5   * You may obtain a copy of the License at
     6   *
     7   *     http://www.apache.org/licenses/LICENSE-2.0
     8   *
     9   * Unless required by applicable law or agreed to in writing, software
    10   * distributed under the License is distributed on an "AS IS" BASIS,
    11   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12   * See the License for the specific language governing permissions and
    13   * limitations under the License.
    14   */
    15  
    16  package main
    17  
    18  import (
    19  	"context"
    20  	"fmt"
    21  	"io/ioutil"
    22  	"math"
    23  	"net/http"
    24  	"net/url"
    25  	"os"
    26  	"strings"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/dgraph-io/dgo"
    31  	"github.com/dgraph-io/dgo/protos/api"
    32  	"github.com/stretchr/testify/require"
    33  	"google.golang.org/grpc"
    34  
    35  	"github.com/dgraph-io/dgraph/ee/backup"
    36  	"github.com/dgraph-io/dgraph/testutil"
    37  	"github.com/dgraph-io/dgraph/x"
    38  )
    39  
    40  var (
    41  	backupDir     = "./data/backups"
    42  	copyBackupDir = "./data/backups_copy"
    43  	restoreDir    = "./data/restore"
    44  	testDirs      = []string{restoreDir}
    45  
    46  	alphaBackupDir = "/data/backups"
    47  
    48  	alphaContainers = []string{
    49  		"alpha1",
    50  		"alpha2",
    51  		"alpha3",
    52  	}
    53  )
    54  
    55  func TestBackupFilesystem(t *testing.T) {
    56  	conn, err := grpc.Dial(testutil.SockAddr, grpc.WithInsecure())
    57  	require.NoError(t, err)
    58  	dg := dgo.NewDgraphClient(api.NewDgraphClient(conn))
    59  
    60  	// Add initial data.
    61  	ctx := context.Background()
    62  	require.NoError(t, dg.Alter(ctx, &api.Operation{DropAll: true}))
    63  	require.NoError(t, dg.Alter(ctx, &api.Operation{Schema: `movie: string .`}))
    64  	original, err := dg.NewTxn().Mutate(ctx, &api.Mutation{
    65  		CommitNow: true,
    66  		SetNquads: []byte(`
    67  			<_:x1> <movie> "BIRDS MAN OR (THE UNEXPECTED VIRTUE OF IGNORANCE)" .
    68  			<_:x2> <movie> "Spotlight" .
    69  			<_:x3> <movie> "Moonlight" .
    70  			<_:x4> <movie> "THE SHAPE OF WATERLOO" .
    71  			<_:x5> <movie> "BLACK PUNTER" .
    72  		`),
    73  	})
    74  	require.NoError(t, err)
    75  	t.Logf("--- Original uid mapping: %+v\n", original.Uids)
    76  
    77  	// Move tablet to group 1 to avoid messes later.
    78  	_, err = http.Get("http://" + testutil.SockAddrZeroHttp + "/moveTablet?tablet=movie&group=1")
    79  	require.NoError(t, err)
    80  
    81  	// After the move, we need to pause a bit to give zero a chance to quorum.
    82  	t.Log("Pausing to let zero move tablet...")
    83  	moveOk := false
    84  	for retry := 5; retry > 0; retry-- {
    85  		time.Sleep(3 * time.Second)
    86  		state, err := testutil.GetState()
    87  		require.NoError(t, err)
    88  		if _, ok := state.Groups["1"].Tablets["movie"]; ok {
    89  			moveOk = true
    90  			break
    91  		}
    92  	}
    93  	require.True(t, moveOk)
    94  
    95  	// Setup test directories.
    96  	dirSetup()
    97  
    98  	// Send backup request.
    99  	_ = runBackup(t, 3, 1)
   100  	restored := runRestore(t, copyBackupDir, "", math.MaxUint64)
   101  
   102  	checks := []struct {
   103  		blank, expected string
   104  	}{
   105  		{blank: "x1", expected: "BIRDS MAN OR (THE UNEXPECTED VIRTUE OF IGNORANCE)"},
   106  		{blank: "x2", expected: "Spotlight"},
   107  		{blank: "x3", expected: "Moonlight"},
   108  		{blank: "x4", expected: "THE SHAPE OF WATERLOO"},
   109  		{blank: "x5", expected: "BLACK PUNTER"},
   110  	}
   111  	for _, check := range checks {
   112  		require.EqualValues(t, check.expected, restored[original.Uids[check.blank]])
   113  	}
   114  
   115  	// Add more data for the incremental backup.
   116  	incr1, err := dg.NewTxn().Mutate(ctx, &api.Mutation{
   117  		CommitNow: true,
   118  		SetNquads: []byte(fmt.Sprintf(`
   119  			<%s> <movie> "Birdman or (The Unexpected Virtue of Ignorance)" .
   120  			<%s> <movie> "The Shape of Waterloo" .
   121  		`, original.Uids["x1"], original.Uids["x4"])),
   122  	})
   123  	t.Logf("%+v", incr1)
   124  	require.NoError(t, err)
   125  
   126  	// Perform first incremental backup.
   127  	_ = runBackup(t, 6, 2)
   128  	restored = runRestore(t, copyBackupDir, "", incr1.Txn.CommitTs)
   129  
   130  	checks = []struct {
   131  		blank, expected string
   132  	}{
   133  		{blank: "x1", expected: "Birdman or (The Unexpected Virtue of Ignorance)"},
   134  		{blank: "x4", expected: "The Shape of Waterloo"},
   135  	}
   136  	for _, check := range checks {
   137  		require.EqualValues(t, check.expected, restored[original.Uids[check.blank]])
   138  	}
   139  
   140  	// Add more data for a second incremental backup.
   141  	incr2, err := dg.NewTxn().Mutate(ctx, &api.Mutation{
   142  		CommitNow: true,
   143  		SetNquads: []byte(fmt.Sprintf(`
   144  				<%s> <movie> "The Shape of Water" .
   145  				<%s> <movie> "The Black Panther" .
   146  			`, original.Uids["x4"], original.Uids["x5"])),
   147  	})
   148  	require.NoError(t, err)
   149  
   150  	// Perform second incremental backup.
   151  	_ = runBackup(t, 9, 3)
   152  	restored = runRestore(t, copyBackupDir, "", incr2.Txn.CommitTs)
   153  
   154  	checks = []struct {
   155  		blank, expected string
   156  	}{
   157  		{blank: "x4", expected: "The Shape of Water"},
   158  		{blank: "x5", expected: "The Black Panther"},
   159  	}
   160  	for _, check := range checks {
   161  		require.EqualValues(t, check.expected, restored[original.Uids[check.blank]])
   162  	}
   163  
   164  	// Add more data for a second full backup.
   165  	incr3, err := dg.NewTxn().Mutate(ctx, &api.Mutation{
   166  		CommitNow: true,
   167  		SetNquads: []byte(fmt.Sprintf(`
   168  				<%s> <movie> "El laberinto del fauno" .
   169  				<%s> <movie> "Black Panther 2" .
   170  			`, original.Uids["x4"], original.Uids["x5"])),
   171  	})
   172  	require.NoError(t, err)
   173  
   174  	// Perform second full backup.
   175  	dirs := runBackupInternal(t, true, 12, 4)
   176  	restored = runRestore(t, copyBackupDir, "", incr3.Txn.CommitTs)
   177  
   178  	// Check all the values were restored to their most recent value.
   179  	checks = []struct {
   180  		blank, expected string
   181  	}{
   182  		{blank: "x1", expected: "Birdman or (The Unexpected Virtue of Ignorance)"},
   183  		{blank: "x2", expected: "Spotlight"},
   184  		{blank: "x3", expected: "Moonlight"},
   185  		{blank: "x4", expected: "El laberinto del fauno"},
   186  		{blank: "x5", expected: "Black Panther 2"},
   187  	}
   188  	for _, check := range checks {
   189  		require.EqualValues(t, check.expected, restored[original.Uids[check.blank]])
   190  	}
   191  
   192  	// Remove the full backup testDirs and verify restore catches the error.
   193  	require.NoError(t, os.RemoveAll(dirs[0]))
   194  	require.NoError(t, os.RemoveAll(dirs[3]))
   195  	runFailingRestore(t, copyBackupDir, "", incr3.Txn.CommitTs)
   196  
   197  	// Clean up test directories.
   198  	dirCleanup()
   199  }
   200  
   201  func runBackup(t *testing.T, numExpectedFiles, numExpectedDirs int) []string {
   202  	return runBackupInternal(t, false, numExpectedFiles, numExpectedDirs)
   203  }
   204  
   205  func runBackupInternal(t *testing.T, forceFull bool, numExpectedFiles,
   206  	numExpectedDirs int) []string {
   207  	forceFullStr := "false"
   208  	if forceFull {
   209  		forceFullStr = "true"
   210  	}
   211  
   212  	resp, err := http.PostForm("http://localhost:8180/admin/backup", url.Values{
   213  		"destination": []string{alphaBackupDir},
   214  		"force_full":  []string{forceFullStr},
   215  	})
   216  	require.NoError(t, err)
   217  	defer resp.Body.Close()
   218  	buf, err := ioutil.ReadAll(resp.Body)
   219  	require.NoError(t, err)
   220  	require.Contains(t, string(buf), "Backup completed.")
   221  
   222  	// Verify that the right amount of files and directories were created.
   223  	copyToLocalFs()
   224  
   225  	files := x.WalkPathFunc(copyBackupDir, func(path string, isdir bool) bool {
   226  		return !isdir && strings.HasSuffix(path, ".backup")
   227  	})
   228  	require.Equal(t, numExpectedFiles, len(files))
   229  
   230  	dirs := x.WalkPathFunc(copyBackupDir, func(path string, isdir bool) bool {
   231  		return isdir && strings.HasPrefix(path, "data/backups_copy/dgraph.")
   232  	})
   233  	require.Equal(t, numExpectedDirs, len(dirs))
   234  
   235  	manifests := x.WalkPathFunc(copyBackupDir, func(path string, isdir bool) bool {
   236  		return !isdir && strings.Contains(path, "manifest.json")
   237  	})
   238  	require.Equal(t, numExpectedDirs, len(manifests))
   239  
   240  	return dirs
   241  }
   242  
   243  func runRestore(t *testing.T, backupLocation, lastDir string, commitTs uint64) map[string]string {
   244  	// Recreate the restore directory to make sure there's no previous data when
   245  	// calling restore.
   246  	require.NoError(t, os.RemoveAll(restoreDir))
   247  	require.NoError(t, os.MkdirAll(restoreDir, os.ModePerm))
   248  
   249  	t.Logf("--- Restoring from: %q", backupLocation)
   250  	_, err := backup.RunRestore("./data/restore", backupLocation, lastDir)
   251  	require.NoError(t, err)
   252  
   253  	restored, err := testutil.GetPValues("./data/restore/p1", "movie", commitTs)
   254  	require.NoError(t, err)
   255  	t.Logf("--- Restored values: %+v\n", restored)
   256  
   257  	return restored
   258  }
   259  
   260  // runFailingRestore is like runRestore but expects an error during restore.
   261  func runFailingRestore(t *testing.T, backupLocation, lastDir string, commitTs uint64) {
   262  	// Recreate the restore directory to make sure there's no previous data when
   263  	// calling restore.
   264  	require.NoError(t, os.RemoveAll(restoreDir))
   265  	require.NoError(t, os.MkdirAll(restoreDir, os.ModePerm))
   266  
   267  	_, err := backup.RunRestore("./data/restore", backupLocation, lastDir)
   268  	require.Error(t, err)
   269  	require.Contains(t, err.Error(), "expected a BackupNum value of 1")
   270  }
   271  
   272  func dirSetup() {
   273  	// Clean up data from previous runs.
   274  	dirCleanup()
   275  
   276  	for _, dir := range testDirs {
   277  		x.Check(os.MkdirAll(dir, os.ModePerm))
   278  	}
   279  
   280  	for _, alpha := range alphaContainers {
   281  		cmd := []string{"mkdir", "-p", alphaBackupDir}
   282  		x.Check(testutil.DockerExec(alpha, cmd...))
   283  	}
   284  }
   285  
   286  func dirCleanup() {
   287  	x.Check(os.RemoveAll(restoreDir))
   288  	x.Check(os.RemoveAll(copyBackupDir))
   289  
   290  	cmd := []string{"bash", "-c", "rm -rf /data/backups/dgraph.*"}
   291  	x.Check(testutil.DockerExec(alphaContainers[0], cmd...))
   292  }
   293  
   294  func copyToLocalFs() {
   295  	// The original backup files are not accessible because docker creates all files in
   296  	// the shared volume as the root user. This restriction is circumvented by using
   297  	// "docker cp" to create a copy that is not owned by the root user.
   298  	x.Check(os.RemoveAll(copyBackupDir))
   299  	srcPath := "alpha1:/data/backups"
   300  	x.Check(testutil.DockerCp(srcPath, copyBackupDir))
   301  }