github.com/demonoid81/containerd@v1.3.4/content/local/store_test.go (about)

     1  /*
     2     Copyright The containerd Authors.
     3  
     4     Licensed under the Apache License, Version 2.0 (the "License");
     5     you may not use this file except in compliance with the License.
     6     You may obtain a copy of the License at
     7  
     8         http://www.apache.org/licenses/LICENSE-2.0
     9  
    10     Unless required by applicable law or agreed to in writing, software
    11     distributed under the License is distributed on an "AS IS" BASIS,
    12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13     See the License for the specific language governing permissions and
    14     limitations under the License.
    15  */
    16  
    17  package local
    18  
    19  import (
    20  	"bufio"
    21  	"bytes"
    22  	"context"
    23  	_ "crypto/sha256" // required for digest package
    24  	"fmt"
    25  	"io"
    26  	"io/ioutil"
    27  	"math/rand"
    28  	"os"
    29  	"path/filepath"
    30  	"reflect"
    31  	"runtime"
    32  	"sync"
    33  	"testing"
    34  	"time"
    35  
    36  	"github.com/containerd/containerd/content"
    37  	"github.com/containerd/containerd/content/testsuite"
    38  	"github.com/containerd/containerd/errdefs"
    39  	"github.com/containerd/containerd/pkg/testutil"
    40  
    41  	"github.com/opencontainers/go-digest"
    42  	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
    43  	"gotest.tools/assert"
    44  )
    45  
    46  type memoryLabelStore struct {
    47  	l      sync.Mutex
    48  	labels map[digest.Digest]map[string]string
    49  }
    50  
    51  func newMemoryLabelStore() LabelStore {
    52  	return &memoryLabelStore{
    53  		labels: map[digest.Digest]map[string]string{},
    54  	}
    55  }
    56  
    57  func (mls *memoryLabelStore) Get(d digest.Digest) (map[string]string, error) {
    58  	mls.l.Lock()
    59  	labels := mls.labels[d]
    60  	mls.l.Unlock()
    61  
    62  	return labels, nil
    63  }
    64  
    65  func (mls *memoryLabelStore) Set(d digest.Digest, labels map[string]string) error {
    66  	mls.l.Lock()
    67  	mls.labels[d] = labels
    68  	mls.l.Unlock()
    69  
    70  	return nil
    71  }
    72  
    73  func (mls *memoryLabelStore) Update(d digest.Digest, update map[string]string) (map[string]string, error) {
    74  	mls.l.Lock()
    75  	labels, ok := mls.labels[d]
    76  	if !ok {
    77  		labels = map[string]string{}
    78  	}
    79  	for k, v := range update {
    80  		if v == "" {
    81  			delete(labels, k)
    82  		} else {
    83  			labels[k] = v
    84  		}
    85  	}
    86  	mls.labels[d] = labels
    87  	mls.l.Unlock()
    88  
    89  	return labels, nil
    90  }
    91  
    92  func TestContent(t *testing.T) {
    93  	testsuite.ContentSuite(t, "fs", func(ctx context.Context, root string) (context.Context, content.Store, func() error, error) {
    94  		cs, err := NewLabeledStore(root, newMemoryLabelStore())
    95  		if err != nil {
    96  			return nil, nil, nil, err
    97  		}
    98  		return ctx, cs, func() error {
    99  			return nil
   100  		}, nil
   101  	})
   102  }
   103  
   104  func TestContentWriter(t *testing.T) {
   105  	ctx, tmpdir, cs, cleanup := contentStoreEnv(t)
   106  	defer cleanup()
   107  	defer testutil.DumpDirOnFailure(t, tmpdir)
   108  
   109  	if _, err := os.Stat(filepath.Join(tmpdir, "ingest")); os.IsNotExist(err) {
   110  		t.Fatal("ingest dir should be created", err)
   111  	}
   112  
   113  	cw, err := cs.Writer(ctx, content.WithRef("myref"))
   114  	if err != nil {
   115  		t.Fatal(err)
   116  	}
   117  	if err := cw.Close(); err != nil {
   118  		t.Fatal(err)
   119  	}
   120  
   121  	// reopen, so we can test things
   122  	cw, err = cs.Writer(ctx, content.WithRef("myref"))
   123  	if err != nil {
   124  		t.Fatal(err)
   125  	}
   126  
   127  	// make sure that second resume also fails
   128  	if _, err = cs.Writer(ctx, content.WithRef("myref")); err == nil {
   129  		// TODO(stevvooe): This also works across processes. Need to find a way
   130  		// to test that, as well.
   131  		t.Fatal("no error on second resume")
   132  	}
   133  
   134  	// we should also see this as an active ingestion
   135  	ingestions, err := cs.ListStatuses(ctx, "")
   136  	if err != nil {
   137  		t.Fatal(err)
   138  	}
   139  
   140  	// clear out the time and meta cause we don't care for this test
   141  	for i := range ingestions {
   142  		ingestions[i].UpdatedAt = time.Time{}
   143  		ingestions[i].StartedAt = time.Time{}
   144  	}
   145  
   146  	if !reflect.DeepEqual(ingestions, []content.Status{
   147  		{
   148  			Ref:    "myref",
   149  			Offset: 0,
   150  		},
   151  	}) {
   152  		t.Fatalf("unexpected ingestion set: %v", ingestions)
   153  	}
   154  
   155  	p := make([]byte, 4<<20)
   156  	if _, err := rand.Read(p); err != nil {
   157  		t.Fatal(err)
   158  	}
   159  	expected := digest.FromBytes(p)
   160  
   161  	checkCopy(t, int64(len(p)), cw, bufio.NewReader(ioutil.NopCloser(bytes.NewReader(p))))
   162  
   163  	if err := cw.Commit(ctx, int64(len(p)), expected); err != nil {
   164  		t.Fatal(err)
   165  	}
   166  
   167  	if err := cw.Close(); err != nil {
   168  		t.Fatal(err)
   169  	}
   170  
   171  	cw, err = cs.Writer(ctx, content.WithRef("aref"))
   172  	if err != nil {
   173  		t.Fatal(err)
   174  	}
   175  
   176  	// now, attempt to write the same data again
   177  	checkCopy(t, int64(len(p)), cw, bufio.NewReader(ioutil.NopCloser(bytes.NewReader(p))))
   178  	if err := cw.Commit(ctx, int64(len(p)), expected); err == nil {
   179  		t.Fatal("expected already exists error")
   180  	} else if !errdefs.IsAlreadyExists(err) {
   181  		t.Fatal(err)
   182  	}
   183  
   184  	path := checkBlobPath(t, cs, expected)
   185  
   186  	// read the data back, make sure its the same
   187  	pp, err := ioutil.ReadFile(path)
   188  	if err != nil {
   189  		t.Fatal(err)
   190  	}
   191  
   192  	if !bytes.Equal(p, pp) {
   193  		t.Fatal("mismatched data written to disk")
   194  	}
   195  
   196  }
   197  
   198  func TestWalkBlobs(t *testing.T) {
   199  	ctx, _, cs, cleanup := contentStoreEnv(t)
   200  	defer cleanup()
   201  
   202  	const (
   203  		nblobs  = 79
   204  		maxsize = 4 << 10
   205  	)
   206  	var (
   207  		blobs    = populateBlobStore(ctx, t, cs, nblobs, maxsize)
   208  		expected = map[digest.Digest]struct{}{}
   209  		found    = map[digest.Digest]struct{}{}
   210  	)
   211  
   212  	for dgst := range blobs {
   213  		expected[dgst] = struct{}{}
   214  	}
   215  
   216  	if err := cs.Walk(ctx, func(bi content.Info) error {
   217  		found[bi.Digest] = struct{}{}
   218  		checkBlobPath(t, cs, bi.Digest)
   219  		return nil
   220  	}); err != nil {
   221  		t.Fatal(err)
   222  	}
   223  
   224  	if !reflect.DeepEqual(expected, found) {
   225  		t.Fatalf("expected did not match found: %v != %v", found, expected)
   226  	}
   227  }
   228  
   229  // BenchmarkIngests checks the insertion time over varying blob sizes.
   230  //
   231  // Note that at the time of writing there is roughly a 4ms insertion overhead
   232  // for blobs. This seems to be due to the number of syscalls and file io we do
   233  // coordinating the ingestion.
   234  func BenchmarkIngests(b *testing.B) {
   235  	ctx, _, cs, cleanup := contentStoreEnv(b)
   236  	defer cleanup()
   237  
   238  	for _, size := range []int64{
   239  		1 << 10,
   240  		4 << 10,
   241  		512 << 10,
   242  		1 << 20,
   243  	} {
   244  		size := size
   245  		b.Run(fmt.Sprint(size), func(b *testing.B) {
   246  			b.StopTimer()
   247  			blobs := generateBlobs(b, int64(b.N), size)
   248  
   249  			var bytes int64
   250  			for _, blob := range blobs {
   251  				bytes += int64(len(blob))
   252  			}
   253  			b.SetBytes(bytes)
   254  
   255  			b.StartTimer()
   256  
   257  			for dgst, p := range blobs {
   258  				checkWrite(ctx, b, cs, dgst, p)
   259  			}
   260  		})
   261  	}
   262  }
   263  
   264  type checker interface {
   265  	Fatal(args ...interface{})
   266  }
   267  
   268  func generateBlobs(t checker, nblobs, maxsize int64) map[digest.Digest][]byte {
   269  	blobs := map[digest.Digest][]byte{}
   270  
   271  	for i := int64(0); i < nblobs; i++ {
   272  		p := make([]byte, rand.Int63n(maxsize))
   273  
   274  		if _, err := rand.Read(p); err != nil {
   275  			t.Fatal(err)
   276  		}
   277  
   278  		dgst := digest.FromBytes(p)
   279  		blobs[dgst] = p
   280  	}
   281  
   282  	return blobs
   283  }
   284  
   285  func populateBlobStore(ctx context.Context, t checker, cs content.Store, nblobs, maxsize int64) map[digest.Digest][]byte {
   286  	blobs := generateBlobs(t, nblobs, maxsize)
   287  
   288  	for dgst, p := range blobs {
   289  		checkWrite(ctx, t, cs, dgst, p)
   290  	}
   291  
   292  	return blobs
   293  }
   294  
   295  func contentStoreEnv(t checker) (context.Context, string, content.Store, func()) {
   296  	pc, _, _, ok := runtime.Caller(1)
   297  	if !ok {
   298  		t.Fatal("failed to resolve caller")
   299  	}
   300  	fn := runtime.FuncForPC(pc)
   301  
   302  	tmpdir, err := ioutil.TempDir("", filepath.Base(fn.Name())+"-")
   303  	if err != nil {
   304  		t.Fatal(err)
   305  	}
   306  
   307  	cs, err := NewStore(tmpdir)
   308  	if err != nil {
   309  		os.RemoveAll(tmpdir)
   310  		t.Fatal(err)
   311  	}
   312  
   313  	ctx, cancel := context.WithCancel(context.Background())
   314  	return ctx, tmpdir, cs, func() {
   315  		cancel()
   316  		os.RemoveAll(tmpdir)
   317  	}
   318  }
   319  
   320  func checkCopy(t checker, size int64, dst io.Writer, src io.Reader) {
   321  	nn, err := io.Copy(dst, src)
   322  	if err != nil {
   323  		t.Fatal(err)
   324  	}
   325  
   326  	if nn != size {
   327  		t.Fatal("incorrect number of bytes copied")
   328  	}
   329  }
   330  
   331  func checkBlobPath(t *testing.T, cs content.Store, dgst digest.Digest) string {
   332  	path := cs.(*store).blobPath(dgst)
   333  
   334  	if path != filepath.Join(cs.(*store).root, "blobs", dgst.Algorithm().String(), dgst.Hex()) {
   335  		t.Fatalf("unexpected path: %q", path)
   336  	}
   337  	fi, err := os.Stat(path)
   338  	if err != nil {
   339  		t.Fatalf("error stating blob path: %v", err)
   340  	}
   341  
   342  	if runtime.GOOS != "windows" {
   343  		// ensure that only read bits are set.
   344  		if ((fi.Mode() & os.ModePerm) & 0333) != 0 {
   345  			t.Fatalf("incorrect permissions: %v", fi.Mode())
   346  		}
   347  	}
   348  
   349  	return path
   350  }
   351  
   352  func checkWrite(ctx context.Context, t checker, cs content.Store, dgst digest.Digest, p []byte) digest.Digest {
   353  	if err := content.WriteBlob(ctx, cs, dgst.String(), bytes.NewReader(p),
   354  		ocispec.Descriptor{Size: int64(len(p)), Digest: dgst}); err != nil {
   355  		t.Fatal(err)
   356  	}
   357  
   358  	return dgst
   359  }
   360  
   361  func TestWriterTruncateRecoversFromIncompleteWrite(t *testing.T) {
   362  	tmpdir, err := ioutil.TempDir("", "test-local-content-store-recover")
   363  	assert.NilError(t, err)
   364  	defer os.RemoveAll(tmpdir)
   365  
   366  	cs, err := NewStore(tmpdir)
   367  	assert.NilError(t, err)
   368  
   369  	ctx, cancel := context.WithCancel(context.Background())
   370  	defer cancel()
   371  
   372  	ref := "ref"
   373  	contentB := []byte("this is the content")
   374  	total := int64(len(contentB))
   375  	setupIncompleteWrite(ctx, t, cs, ref, total)
   376  
   377  	writer, err := cs.Writer(ctx, content.WithRef(ref), content.WithDescriptor(ocispec.Descriptor{Size: total}))
   378  	assert.NilError(t, err)
   379  
   380  	assert.NilError(t, writer.Truncate(0))
   381  
   382  	_, err = writer.Write(contentB)
   383  	assert.NilError(t, err)
   384  
   385  	dgst := digest.FromBytes(contentB)
   386  	err = writer.Commit(ctx, total, dgst)
   387  	assert.NilError(t, err)
   388  }
   389  
   390  func setupIncompleteWrite(ctx context.Context, t *testing.T, cs content.Store, ref string, total int64) {
   391  	writer, err := cs.Writer(ctx, content.WithRef(ref), content.WithDescriptor(ocispec.Descriptor{Size: total}))
   392  	assert.NilError(t, err)
   393  
   394  	_, err = writer.Write([]byte("bad data"))
   395  	assert.NilError(t, err)
   396  
   397  	assert.NilError(t, writer.Close())
   398  }
   399  
   400  func TestWriteReadEmptyFileTimestamp(t *testing.T) {
   401  	root, err := ioutil.TempDir("", "test-write-read-file-timestamp")
   402  	if err != nil {
   403  		t.Errorf("failed to create a tmp dir: %v", err)
   404  	}
   405  	defer os.RemoveAll(root)
   406  
   407  	emptyFile := filepath.Join(root, "updatedat")
   408  	if err := writeTimestampFile(emptyFile, time.Time{}); err != nil {
   409  		t.Errorf("failed to write Zero Time to file: %v", err)
   410  	}
   411  
   412  	timestamp, err := readFileTimestamp(emptyFile)
   413  	if err != nil {
   414  		t.Errorf("read empty timestamp file should success, but got error: %v", err)
   415  	}
   416  	if !timestamp.IsZero() {
   417  		t.Errorf("read empty timestamp file should return time.Time{}, but got: %v", timestamp)
   418  	}
   419  }