github.com/mckael/restic@v0.8.3/internal/restic/testing.go (about)

     1  package restic
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"fmt"
     7  	"io"
     8  	"math/rand"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/restic/restic/internal/errors"
    13  
    14  	"github.com/restic/chunker"
    15  )
    16  
    17  // fakeFile returns a reader which yields deterministic pseudo-random data.
    18  func fakeFile(t testing.TB, seed, size int64) io.Reader {
    19  	return io.LimitReader(NewRandReader(rand.New(rand.NewSource(seed))), size)
    20  }
    21  
    22  type fakeFileSystem struct {
    23  	t           testing.TB
    24  	repo        Repository
    25  	knownBlobs  IDSet
    26  	duplication float32
    27  	buf         []byte
    28  	chunker     *chunker.Chunker
    29  }
    30  
    31  // saveFile reads from rd and saves the blobs in the repository. The list of
    32  // IDs is returned.
    33  func (fs *fakeFileSystem) saveFile(ctx context.Context, rd io.Reader) (blobs IDs) {
    34  	if fs.buf == nil {
    35  		fs.buf = make([]byte, chunker.MaxSize)
    36  	}
    37  
    38  	if fs.chunker == nil {
    39  		fs.chunker = chunker.New(rd, fs.repo.Config().ChunkerPolynomial)
    40  	} else {
    41  		fs.chunker.Reset(rd, fs.repo.Config().ChunkerPolynomial)
    42  	}
    43  
    44  	blobs = IDs{}
    45  	for {
    46  		chunk, err := fs.chunker.Next(fs.buf)
    47  		if errors.Cause(err) == io.EOF {
    48  			break
    49  		}
    50  
    51  		if err != nil {
    52  			fs.t.Fatalf("unable to save chunk in repo: %v", err)
    53  		}
    54  
    55  		id := Hash(chunk.Data)
    56  		if !fs.blobIsKnown(id, DataBlob) {
    57  			_, err := fs.repo.SaveBlob(ctx, DataBlob, chunk.Data, id)
    58  			if err != nil {
    59  				fs.t.Fatalf("error saving chunk: %v", err)
    60  			}
    61  
    62  			fs.knownBlobs.Insert(id)
    63  		}
    64  
    65  		blobs = append(blobs, id)
    66  	}
    67  
    68  	return blobs
    69  }
    70  
    71  const (
    72  	maxFileSize = 1500000
    73  	maxSeed     = 32
    74  	maxNodes    = 32
    75  )
    76  
    77  func (fs *fakeFileSystem) treeIsKnown(tree *Tree) (bool, []byte, ID) {
    78  	data, err := json.Marshal(tree)
    79  	if err != nil {
    80  		fs.t.Fatalf("json.Marshal(tree) returned error: %v", err)
    81  		return false, nil, ID{}
    82  	}
    83  	data = append(data, '\n')
    84  
    85  	id := Hash(data)
    86  	return fs.blobIsKnown(id, TreeBlob), data, id
    87  }
    88  
    89  func (fs *fakeFileSystem) blobIsKnown(id ID, t BlobType) bool {
    90  	if rand.Float32() < fs.duplication {
    91  		return false
    92  	}
    93  
    94  	if fs.knownBlobs.Has(id) {
    95  		return true
    96  	}
    97  
    98  	if fs.repo.Index().Has(id, t) {
    99  		return true
   100  	}
   101  
   102  	fs.knownBlobs.Insert(id)
   103  	return false
   104  }
   105  
   106  // saveTree saves a tree of fake files in the repo and returns the ID.
   107  func (fs *fakeFileSystem) saveTree(ctx context.Context, seed int64, depth int) ID {
   108  	rnd := rand.NewSource(seed)
   109  	numNodes := int(rnd.Int63() % maxNodes)
   110  
   111  	var tree Tree
   112  	for i := 0; i < numNodes; i++ {
   113  
   114  		// randomly select the type of the node, either tree (p = 1/4) or file (p = 3/4).
   115  		if depth > 1 && rnd.Int63()%4 == 0 {
   116  			treeSeed := rnd.Int63() % maxSeed
   117  			id := fs.saveTree(ctx, treeSeed, depth-1)
   118  
   119  			node := &Node{
   120  				Name:    fmt.Sprintf("dir-%v", treeSeed),
   121  				Type:    "dir",
   122  				Mode:    0755,
   123  				Subtree: &id,
   124  			}
   125  
   126  			tree.Nodes = append(tree.Nodes, node)
   127  			continue
   128  		}
   129  
   130  		fileSeed := rnd.Int63() % maxSeed
   131  		fileSize := (maxFileSize / maxSeed) * fileSeed
   132  
   133  		node := &Node{
   134  			Name: fmt.Sprintf("file-%v", fileSeed),
   135  			Type: "file",
   136  			Mode: 0644,
   137  			Size: uint64(fileSize),
   138  		}
   139  
   140  		node.Content = fs.saveFile(ctx, fakeFile(fs.t, fileSeed, fileSize))
   141  		tree.Nodes = append(tree.Nodes, node)
   142  	}
   143  
   144  	known, buf, id := fs.treeIsKnown(&tree)
   145  	if known {
   146  		return id
   147  	}
   148  
   149  	_, err := fs.repo.SaveBlob(ctx, TreeBlob, buf, id)
   150  	if err != nil {
   151  		fs.t.Fatal(err)
   152  	}
   153  
   154  	return id
   155  }
   156  
   157  // TestCreateSnapshot creates a snapshot filled with fake data. The
   158  // fake data is generated deterministically from the timestamp `at`, which is
   159  // also used as the snapshot's timestamp. The tree's depth can be specified
   160  // with the parameter depth. The parameter duplication is a probability that
   161  // the same blob will saved again.
   162  func TestCreateSnapshot(t testing.TB, repo Repository, at time.Time, depth int, duplication float32) *Snapshot {
   163  	seed := at.Unix()
   164  	t.Logf("create fake snapshot at %s with seed %d", at, seed)
   165  
   166  	fakedir := fmt.Sprintf("fakedir-at-%v", at.Format("2006-01-02 15:04:05"))
   167  	snapshot, err := NewSnapshot([]string{fakedir}, []string{"test"}, "foo", time.Now())
   168  	if err != nil {
   169  		t.Fatal(err)
   170  	}
   171  	snapshot.Time = at
   172  
   173  	fs := fakeFileSystem{
   174  		t:           t,
   175  		repo:        repo,
   176  		knownBlobs:  NewIDSet(),
   177  		duplication: duplication,
   178  	}
   179  
   180  	treeID := fs.saveTree(context.TODO(), seed, depth)
   181  	snapshot.Tree = &treeID
   182  
   183  	id, err := repo.SaveJSONUnpacked(context.TODO(), SnapshotFile, snapshot)
   184  	if err != nil {
   185  		t.Fatal(err)
   186  	}
   187  
   188  	snapshot.id = &id
   189  
   190  	t.Logf("saved snapshot %v", id.Str())
   191  
   192  	err = repo.Flush(context.Background())
   193  	if err != nil {
   194  		t.Fatal(err)
   195  	}
   196  
   197  	err = repo.SaveIndex(context.TODO())
   198  	if err != nil {
   199  		t.Fatal(err)
   200  	}
   201  
   202  	return snapshot
   203  }
   204  
   205  // TestParseID parses s as a ID and panics if that fails.
   206  func TestParseID(s string) ID {
   207  	id, err := ParseID(s)
   208  	if err != nil {
   209  		panic(fmt.Sprintf("unable to parse string %q as ID: %v", s, err))
   210  	}
   211  
   212  	return id
   213  }