github.com/m3db/m3@v1.5.0/src/dbnode/persist/fs/files_test.go (about)

     1  // Copyright (c) 2016 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package fs
    22  
    23  import (
    24  	"errors"
    25  	"fmt"
    26  	"io/ioutil"
    27  	"os"
    28  	"path"
    29  	"path/filepath"
    30  	"sort"
    31  	"strconv"
    32  	"strings"
    33  	"testing"
    34  	"time"
    35  
    36  	"github.com/m3db/m3/src/dbnode/digest"
    37  	"github.com/m3db/m3/src/dbnode/namespace"
    38  	"github.com/m3db/m3/src/dbnode/persist"
    39  	"github.com/m3db/m3/src/dbnode/retention"
    40  	"github.com/m3db/m3/src/x/ident"
    41  	"github.com/m3db/m3/src/x/instrument"
    42  	xresource "github.com/m3db/m3/src/x/resource"
    43  	xtime "github.com/m3db/m3/src/x/time"
    44  
    45  	"github.com/pborman/uuid"
    46  	"github.com/stretchr/testify/assert"
    47  	"github.com/stretchr/testify/require"
    48  )
    49  
    50  var (
    51  	testNs1ID       = ident.StringID("testNs")
    52  	testNs2ID       = ident.StringID("testNs2")
    53  	testNs1Metadata = func(t *testing.T) namespace.Metadata {
    54  		md, err := namespace.NewMetadata(testNs1ID, namespace.NewOptions().
    55  			SetCacheBlocksOnRetrieve(true).
    56  			SetRetentionOptions(retention.NewOptions().SetBlockSize(testBlockSize)).
    57  			SetIndexOptions(namespace.NewIndexOptions().SetEnabled(true).SetBlockSize(testBlockSize)))
    58  		require.NoError(t, err)
    59  		return md
    60  	}
    61  	testNs2Metadata = func(t *testing.T) namespace.Metadata {
    62  		md, err := namespace.NewMetadata(testNs2ID, namespace.NewOptions().
    63  			SetCacheBlocksOnRetrieve(true).
    64  			SetRetentionOptions(retention.NewOptions().SetBlockSize(testBlockSize)).
    65  			SetIndexOptions(namespace.NewIndexOptions().SetEnabled(true).SetBlockSize(testBlockSize)))
    66  		require.NoError(t, err)
    67  		return md
    68  	}
    69  )
    70  
    71  func TestOpenFilesFails(t *testing.T) {
    72  	testFilePath := "/not/a/real/path"
    73  	expectedErr := errors.New("synthetic error")
    74  
    75  	opener := func(filePath string) (*os.File, error) {
    76  		assert.Equal(t, filePath, testFilePath)
    77  		return nil, expectedErr
    78  	}
    79  
    80  	var fd *os.File
    81  	err := openFiles(opener, map[string]**os.File{
    82  		testFilePath: &fd,
    83  	})
    84  	assert.Error(t, err)
    85  	assert.Equal(t, expectedErr, err)
    86  }
    87  
    88  func TestCloseAllFails(t *testing.T) {
    89  	file := createTempFile(t)
    90  	defer os.Remove(file.Name())
    91  
    92  	assert.NoError(t, file.Close())
    93  	assert.Error(t, xresource.CloseAll(file))
    94  }
    95  
    96  func TestDeleteFiles(t *testing.T) {
    97  	var files []string
    98  	iter := 3
    99  
   100  	for i := 0; i < iter; i++ {
   101  		fd := createTempFile(t)
   102  		fd.Close()
   103  		files = append(files, fd.Name())
   104  	}
   105  
   106  	// Add a non-existent file path
   107  	files = append(files, "/not/a/real/path")
   108  
   109  	require.Error(t, DeleteFiles(files))
   110  	for i := 0; i < iter; i++ {
   111  		require.True(t, !mustFileExists(t, files[i]))
   112  	}
   113  }
   114  
   115  func TestDeleteInactiveDirectories(t *testing.T) {
   116  	tempPrefix, err := ioutil.TempDir("", "filespath")
   117  	require.NoError(t, err)
   118  	defer func() {
   119  		os.RemoveAll(tempPrefix)
   120  	}()
   121  	namespaceDir := NamespaceDataDirPath(tempPrefix, testNs1ID)
   122  
   123  	// Test shard deletion within a namespace
   124  	shards := []uint32{uint32(4), uint32(5), uint32(6)}
   125  	shardDirs := []string{"4", "5", "6"}
   126  	for _, shard := range shards {
   127  		shardDir := ShardDataDirPath(tempPrefix, testNs1ID, shard)
   128  		err := os.MkdirAll(shardDir, defaultNewDirectoryMode)
   129  		require.NoError(t, err)
   130  
   131  		shardPath := path.Join(shardDir, "data.txt")
   132  		_, err = os.Create(shardPath)
   133  		require.NoError(t, err)
   134  	}
   135  
   136  	activeShards := shardDirs[1:]
   137  	err = DeleteInactiveDirectories(namespaceDir, activeShards)
   138  	require.NoError(t, err)
   139  	dirs, err := ioutil.ReadDir(namespaceDir)
   140  	require.NoError(t, err)
   141  	require.Equal(t, 2, len(dirs))
   142  	os.RemoveAll(namespaceDir)
   143  }
   144  
   145  func TestByTimeAscending(t *testing.T) {
   146  	files := []string{"foo/fileset-1-info.db", "foo/fileset-12-info.db", "foo/fileset-2-info.db"}
   147  	expected := []string{"foo/fileset-1-info.db", "foo/fileset-2-info.db", "foo/fileset-12-info.db"}
   148  	sort.Sort(byTimeAscending(files))
   149  	require.Equal(t, expected, files)
   150  }
   151  
   152  func TestForEachInfoFile(t *testing.T) {
   153  	dir := createTempDir(t)
   154  	defer os.RemoveAll(dir)
   155  
   156  	shard := uint32(0)
   157  	shardDir := ShardDataDirPath(dir, testNs1ID, shard)
   158  	require.NoError(t, os.MkdirAll(shardDir, os.ModeDir|os.FileMode(0755)))
   159  
   160  	blockStart := xtime.UnixNano(0)
   161  	buf := digest.NewBuffer()
   162  
   163  	// No checkpoint file
   164  	createDataFile(t, shardDir, blockStart, InfoFileSuffix, nil)
   165  
   166  	// No digest file
   167  	blockStart = blockStart.Add(time.Nanosecond)
   168  	createDataFile(t, shardDir, blockStart, InfoFileSuffix, nil)
   169  	createDataFile(t, shardDir, blockStart, CheckpointFileSuffix, buf)
   170  
   171  	// Digest of digest mismatch
   172  	blockStart = blockStart.Add(time.Nanosecond)
   173  	digests := []byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc}
   174  	buf.WriteDigest(digest.Checksum(append(digests, 0xd)))
   175  	createDataFile(t, shardDir, blockStart, InfoFileSuffix, nil)
   176  	createDataFile(t, shardDir, blockStart, DigestFileSuffix, digests)
   177  	createDataFile(t, shardDir, blockStart, CheckpointFileSuffix, buf)
   178  
   179  	// Info file digest mismatch
   180  	blockStart = blockStart.Add(time.Nanosecond)
   181  
   182  	buf.WriteDigest(digest.Checksum(digests))
   183  	createDataFile(t, shardDir, blockStart, InfoFileSuffix, []byte{0x1})
   184  	createDataFile(t, shardDir, blockStart, DigestFileSuffix, digests)
   185  	createDataFile(t, shardDir, blockStart, CheckpointFileSuffix, buf)
   186  
   187  	// All digests match
   188  	blockStart = blockStart.Add(time.Nanosecond)
   189  	infoData := []byte{0x1, 0x2, 0x3, 0x4}
   190  
   191  	buf.WriteDigest(digest.Checksum(infoData))
   192  
   193  	digestOfDigest := append(buf, []byte{0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc}...)
   194  	buf.WriteDigest(digest.Checksum(digestOfDigest))
   195  	createDataFile(t, shardDir, blockStart, InfoFileSuffix, infoData)
   196  	createDataFile(t, shardDir, blockStart, DigestFileSuffix, digestOfDigest)
   197  	createDataFile(t, shardDir, blockStart, CheckpointFileSuffix, buf)
   198  
   199  	var fnames []string
   200  	var res []byte
   201  	forEachInfoFile(
   202  		forEachInfoFileSelector{
   203  			fileSetType:    persist.FileSetFlushType,
   204  			contentType:    persist.FileSetDataContentType,
   205  			filePathPrefix: dir,
   206  			namespace:      testNs1ID,
   207  			shard:          shard,
   208  		},
   209  		testReaderBufferSize,
   210  		func(file FileSetFile, data []byte, _ bool) {
   211  			fname, ok := file.InfoFilePath()
   212  			require.True(t, ok)
   213  			fnames = append(fnames, fname)
   214  			res = append(res, data...)
   215  		})
   216  
   217  	require.Equal(t, []string{filesetPathFromTimeLegacy(shardDir, blockStart, InfoFileSuffix)}, fnames)
   218  	require.Equal(t, infoData, res)
   219  }
   220  
   221  func TestTimeFromFileName(t *testing.T) {
   222  	_, err := TimeFromFileName("foo/bar")
   223  	require.Error(t, err)
   224  	require.Equal(t, "unexpected filename: foo/bar", err.Error())
   225  
   226  	_, err = TimeFromFileName("foo/bar-baz")
   227  	require.Error(t, err)
   228  
   229  	v, err := TimeFromFileName("foo-1-bar.db")
   230  	expected := xtime.UnixNano(1)
   231  	require.Equal(t, expected, v)
   232  	require.NoError(t, err)
   233  
   234  	v, err = TimeFromFileName("foo-12345-6-bar.db")
   235  	expected = xtime.UnixNano(12345)
   236  	require.Equal(t, expected, v)
   237  	require.NoError(t, err)
   238  
   239  	v, err = TimeFromFileName("foo/bar/foo-21234567890-bar.db")
   240  	expected = xtime.UnixNano(21234567890)
   241  	require.Equal(t, expected, v)
   242  	require.NoError(t, err)
   243  }
   244  
   245  func TestTimeAndIndexFromCommitlogFileName(t *testing.T) {
   246  	_, _, err := TimeAndIndexFromCommitlogFilename("foo/bar")
   247  	require.Error(t, err)
   248  	require.Equal(t, "unexpected filename: foo/bar", err.Error())
   249  
   250  	_, _, err = TimeAndIndexFromCommitlogFilename("foo/bar-baz")
   251  	require.Error(t, err)
   252  
   253  	type expected struct {
   254  		t xtime.UnixNano
   255  		i int
   256  	}
   257  	ts, i, err := TimeAndIndexFromCommitlogFilename("foo-1-0.db")
   258  	exp := expected{xtime.UnixNano(1), 0}
   259  	require.Equal(t, exp.t, ts)
   260  	require.Equal(t, exp.i, i)
   261  	require.NoError(t, err)
   262  
   263  	ts, i, err = TimeAndIndexFromCommitlogFilename("foo/bar/foo-21234567890-1.db")
   264  	exp = expected{xtime.UnixNano(21234567890), 1}
   265  	require.Equal(t, exp.t, ts)
   266  	require.Equal(t, exp.i, i)
   267  	require.NoError(t, err)
   268  }
   269  
   270  func TestTimeAndVolumeIndexFromFileSetFilename(t *testing.T) {
   271  	_, _, err := TimeAndVolumeIndexFromFileSetFilename("foo/bar")
   272  	require.Error(t, err)
   273  	require.Equal(t, "unexpected filename: foo/bar", err.Error())
   274  
   275  	_, _, err = TimeAndVolumeIndexFromFileSetFilename("foo/bar-baz")
   276  	require.Error(t, err)
   277  
   278  	type expected struct {
   279  		t xtime.UnixNano
   280  		i int
   281  	}
   282  	ts, i, err := TimeAndVolumeIndexFromFileSetFilename("foo-1-0-data.db")
   283  	exp := expected{xtime.UnixNano(1), 0}
   284  	require.Equal(t, exp.t, ts)
   285  	require.Equal(t, exp.i, i)
   286  	require.NoError(t, err)
   287  
   288  	validName := "foo/bar/fileset-21234567890-1-data.db"
   289  	ts, i, err = TimeAndVolumeIndexFromFileSetFilename(validName)
   290  	exp = expected{xtime.UnixNano(21234567890), 1}
   291  	require.Equal(t, exp.t, ts)
   292  	require.Equal(t, exp.i, i)
   293  	require.NoError(t, err)
   294  	require.Equal(t, FilesetPathFromTimeAndIndex("foo/bar", exp.t, exp.i, "data"), validName)
   295  }
   296  
   297  func TestTimeAndVolumeIndexFromDataFileSetFilename(t *testing.T) {
   298  	_, _, err := TimeAndVolumeIndexFromDataFileSetFilename("foo/bar")
   299  	require.Error(t, err)
   300  	require.Equal(t, "unexpected filename: foo/bar", err.Error())
   301  
   302  	_, _, err = TimeAndVolumeIndexFromDataFileSetFilename("foo/bar-baz")
   303  	require.Error(t, err)
   304  
   305  	type expected struct {
   306  		t xtime.UnixNano
   307  		i int
   308  	}
   309  	ts, i, err := TimeAndVolumeIndexFromDataFileSetFilename("foo-1-0-data.db")
   310  	exp := expected{xtime.UnixNano(1), 0}
   311  	require.Equal(t, exp.t, ts)
   312  	require.Equal(t, exp.i, i)
   313  	require.NoError(t, err)
   314  
   315  	validName := "foo/bar/fileset-21234567890-1-data.db"
   316  	ts, i, err = TimeAndVolumeIndexFromDataFileSetFilename(validName)
   317  	exp = expected{xtime.UnixNano(21234567890), 1}
   318  	require.Equal(t, exp.t, ts)
   319  	require.Equal(t, exp.i, i)
   320  	require.NoError(t, err)
   321  	require.Equal(t, dataFilesetPathFromTimeAndIndex("foo/bar", exp.t, exp.i, "data", false), validName)
   322  
   323  	unindexedName := "foo/bar/fileset-21234567890-data.db"
   324  	ts, i, err = TimeAndVolumeIndexFromDataFileSetFilename(unindexedName)
   325  	exp = expected{xtime.UnixNano(21234567890), 0}
   326  	require.Equal(t, exp.t, ts)
   327  	require.Equal(t, exp.i, i)
   328  	require.NoError(t, err)
   329  	require.Equal(t, dataFilesetPathFromTimeAndIndex("foo/bar", exp.t, exp.i, "data", true), unindexedName)
   330  }
   331  
   332  func TestSnapshotMetadataFilePathFromIdentifierRoundTrip(t *testing.T) {
   333  	idUUID := uuid.Parse("bf58eb3e-0582-42ee-83b2-d098c206260e")
   334  	require.NotNil(t, idUUID)
   335  
   336  	var (
   337  		prefix = "/var/lib/m3db"
   338  		id     = SnapshotMetadataIdentifier{
   339  			Index: 10,
   340  			UUID:  idUUID,
   341  		}
   342  	)
   343  
   344  	var (
   345  		expected = "/var/lib/m3db/snapshots/snapshot-bf58eb3e058242ee83b2d098c206260e-10-metadata.db"
   346  		actual   = snapshotMetadataFilePathFromIdentifier(prefix, id)
   347  	)
   348  	require.Equal(t, expected, actual)
   349  
   350  	idFromPath, err := snapshotMetadataIdentifierFromFilePath(expected)
   351  	require.NoError(t, err)
   352  	require.Equal(t, id, idFromPath)
   353  }
   354  
   355  func TestSnapshotMetadataCheckpointFilePathFromIdentifierRoundTrip(t *testing.T) {
   356  	idUUID := uuid.Parse("bf58eb3e-0582-42ee-83b2-d098c206260e")
   357  	require.NotNil(t, idUUID)
   358  
   359  	var (
   360  		prefix = "/var/lib/m3db"
   361  		id     = SnapshotMetadataIdentifier{
   362  			Index: 10,
   363  			UUID:  idUUID,
   364  		}
   365  	)
   366  
   367  	var (
   368  		expected = "/var/lib/m3db/snapshots/snapshot-bf58eb3e058242ee83b2d098c206260e-10-metadata-checkpoint.db"
   369  		actual   = snapshotMetadataCheckpointFilePathFromIdentifier(prefix, id)
   370  	)
   371  	require.Equal(t, expected, actual)
   372  
   373  	idFromPath, err := snapshotMetadataIdentifierFromFilePath(expected)
   374  	require.NoError(t, err)
   375  	require.Equal(t, id, idFromPath)
   376  }
   377  
   378  func TestSanitizedUUIDsCanBeParsed(t *testing.T) {
   379  	u := uuid.Parse("bf58eb3e-0582-42ee-83b2-d098c206260e")
   380  	require.NotNil(t, u)
   381  
   382  	parsedUUID, ok := parseUUID(sanitizeUUID(u))
   383  	require.True(t, ok)
   384  	require.Equal(t, u.String(), parsedUUID.String())
   385  }
   386  
   387  func TestFileExists(t *testing.T) {
   388  	var (
   389  		dir               = createTempDir(t)
   390  		shard             = uint32(10)
   391  		start             = xtime.Now()
   392  		shardDir          = ShardDataDirPath(dir, testNs1ID, shard)
   393  		checkpointFileBuf = make([]byte, CheckpointFileSizeBytes)
   394  		err               = os.MkdirAll(shardDir, defaultNewDirectoryMode)
   395  	)
   396  	defer os.RemoveAll(dir)
   397  	require.NoError(t, err)
   398  
   399  	infoFilePath := filesetPathFromTimeLegacy(shardDir, start, InfoFileSuffix)
   400  	createDataFile(t, shardDir, start, InfoFileSuffix, checkpointFileBuf)
   401  	require.True(t, mustFileExists(t, infoFilePath))
   402  	exists, err := DataFileSetExists(dir, testNs1ID, uint32(shard), start, 0)
   403  	require.NoError(t, err)
   404  	require.False(t, exists)
   405  
   406  	checkpointFilePath := filesetPathFromTimeLegacy(shardDir, start, CheckpointFileSuffix)
   407  	createDataFile(t, shardDir, start, CheckpointFileSuffix, checkpointFileBuf)
   408  	exists, err = DataFileSetExists(dir, testNs1ID, uint32(shard), start, 0)
   409  	require.NoError(t, err)
   410  	require.True(t, exists)
   411  	exists, err = DataFileSetExists(dir, testNs1ID, uint32(shard), start, 1)
   412  	require.NoError(t, err)
   413  	require.False(t, exists)
   414  
   415  	exists, err = CompleteCheckpointFileExists(checkpointFilePath)
   416  	require.NoError(t, err)
   417  	require.True(t, exists)
   418  
   419  	defer instrument.SetShouldPanicEnvironmentVariable(true)()
   420  	require.Panics(t, func() {
   421  		_, _ = FileExists(checkpointFilePath)
   422  	})
   423  
   424  	os.Remove(infoFilePath)
   425  	require.False(t, mustFileExists(t, infoFilePath))
   426  }
   427  
   428  func TestCompleteCheckpointFileExists(t *testing.T) {
   429  	var (
   430  		dir                = createTempDir(t)
   431  		shard              = uint32(10)
   432  		start              = xtime.Now()
   433  		shardDir           = ShardDataDirPath(dir, testNs1ID, shard)
   434  		checkpointFilePath = filesetPathFromTimeLegacy(shardDir, start, CheckpointFileSuffix)
   435  		err                = os.MkdirAll(shardDir, defaultNewDirectoryMode)
   436  
   437  		validCheckpointFileBuf   = make([]byte, CheckpointFileSizeBytes)
   438  		invalidCheckpointFileBuf = make([]byte, CheckpointFileSizeBytes+1)
   439  	)
   440  	defer os.RemoveAll(dir)
   441  	require.NoError(t, err)
   442  
   443  	createDataFile(t, shardDir, start, CheckpointFileSuffix, invalidCheckpointFileBuf)
   444  	exists, err := CompleteCheckpointFileExists(checkpointFilePath)
   445  	require.NoError(t, err)
   446  	require.False(t, exists)
   447  
   448  	createDataFile(t, shardDir, start, CheckpointFileSuffix, validCheckpointFileBuf)
   449  	exists, err = CompleteCheckpointFileExists(checkpointFilePath)
   450  	require.NoError(t, err)
   451  	require.True(t, exists)
   452  
   453  	defer instrument.SetShouldPanicEnvironmentVariable(true)()
   454  	require.Panics(t, func() { _, _ = CompleteCheckpointFileExists("some-arbitrary-file") })
   455  }
   456  
   457  func TestShardDirPath(t *testing.T) {
   458  	require.Equal(t, "foo/bar/data/testNs/12", ShardDataDirPath("foo/bar", testNs1ID, 12))
   459  	require.Equal(t, "foo/bar/data/testNs/12", ShardDataDirPath("foo/bar/", testNs1ID, 12))
   460  }
   461  
   462  func TestFilePathFromTime(t *testing.T) {
   463  	start := xtime.FromSecondsAndNanos(1465501321, 123456789)
   464  	inputs := []struct {
   465  		prefix   string
   466  		suffix   string
   467  		expected string
   468  	}{
   469  		{"foo/bar", InfoFileSuffix, "foo/bar/fileset-1465501321123456789-info.db"},
   470  		{"foo/bar", indexFileSuffix, "foo/bar/fileset-1465501321123456789-index.db"},
   471  		{"foo/bar", dataFileSuffix, "foo/bar/fileset-1465501321123456789-data.db"},
   472  		{"foo/bar", CheckpointFileSuffix, "foo/bar/fileset-1465501321123456789-checkpoint.db"},
   473  		{"foo/bar/", InfoFileSuffix, "foo/bar/fileset-1465501321123456789-info.db"},
   474  	}
   475  	for _, input := range inputs {
   476  		require.Equal(t, input.expected, filesetPathFromTimeLegacy(input.prefix, start, input.suffix))
   477  	}
   478  }
   479  
   480  func TestFileSetFilesBefore(t *testing.T) {
   481  	shard := uint32(0)
   482  	dir := createDataFlushInfoFilesDir(t, testNs1ID, shard, 20)
   483  	defer os.RemoveAll(dir)
   484  
   485  	cutoffIter := 8
   486  	cutoff := xtime.UnixNano(1 + cutoffIter)
   487  	res, err := DataFileSetsBefore(dir, testNs1ID, shard, cutoff)
   488  	require.NoError(t, err)
   489  	require.Equal(t, cutoffIter, len(res))
   490  
   491  	shardDir := path.Join(dir, dataDirName, testNs1ID.String(), strconv.Itoa(int(shard)))
   492  	for i := 0; i < len(res); i++ {
   493  		ts := xtime.UnixNano(int64(i + 1))
   494  		require.Equal(t, filesetPathFromTimeLegacy(shardDir, ts, InfoFileSuffix), res[i])
   495  	}
   496  }
   497  
   498  func TestFileSetAt(t *testing.T) {
   499  	shard := uint32(0)
   500  	numIters := 20
   501  	dir := createDataCheckpointFilesDir(t, testNs1ID, shard, numIters)
   502  	defer os.RemoveAll(dir)
   503  
   504  	for i := 0; i < numIters; i++ {
   505  		timestamp := xtime.UnixNano(int64(i))
   506  		res, ok, err := FileSetAt(dir, testNs1ID, shard, timestamp, 0)
   507  		require.NoError(t, err)
   508  		require.True(t, ok)
   509  		require.Equal(t, timestamp, res.ID.BlockStart)
   510  	}
   511  }
   512  
   513  func TestFileSetAtNonLegacy(t *testing.T) {
   514  	shard := uint32(0)
   515  	numIters := 20
   516  	dir := createDataFiles(t, dataDirName, testNs1ID, shard, numIters, true, CheckpointFileSuffix)
   517  	defer os.RemoveAll(dir)
   518  
   519  	for i := 0; i < numIters; i++ {
   520  		timestamp := xtime.UnixNano(int64(i))
   521  		res, ok, err := FileSetAt(dir, testNs1ID, shard, timestamp, 0)
   522  		require.NoError(t, err)
   523  		require.True(t, ok)
   524  		require.Equal(t, timestamp, res.ID.BlockStart)
   525  	}
   526  }
   527  
   528  func TestFileSetAtNotFirstVolumeIndex(t *testing.T) {
   529  	shard := uint32(0)
   530  	numIters := 20
   531  	volumeIndex := 1
   532  	dir := createDataFilesWithVolumeIndex(t, dataDirName, testNs1ID, shard, numIters, true,
   533  		CheckpointFileSuffix, volumeIndex)
   534  	defer os.RemoveAll(dir)
   535  
   536  	for i := 0; i < numIters; i++ {
   537  		timestamp := xtime.UnixNano(int64(i))
   538  		res, ok, err := FileSetAt(dir, testNs1ID, shard, timestamp, volumeIndex)
   539  		require.NoError(t, err)
   540  		require.True(t, ok)
   541  		require.Equal(t, timestamp, res.ID.BlockStart)
   542  	}
   543  }
   544  
   545  func TestFileSetAtIgnoresWithoutCheckpoint(t *testing.T) {
   546  	shard := uint32(0)
   547  	numIters := 20
   548  	dir := createDataFlushInfoFilesDir(t, testNs1ID, shard, numIters)
   549  	defer os.RemoveAll(dir)
   550  
   551  	for i := 0; i < numIters; i++ {
   552  		timestamp := xtime.UnixNano(int64(i))
   553  		_, ok, err := FileSetAt(dir, testNs1ID, shard, timestamp, 0)
   554  		require.NoError(t, err)
   555  		require.False(t, ok)
   556  	}
   557  }
   558  
   559  func TestDeleteFileSetAt(t *testing.T) {
   560  	shard := uint32(0)
   561  	numIters := 20
   562  	dir := createDataCheckpointFilesDir(t, testNs1ID, shard, numIters)
   563  	defer os.RemoveAll(dir)
   564  
   565  	for i := 0; i < numIters; i++ {
   566  		timestamp := xtime.UnixNano(int64(i))
   567  		res, ok, err := FileSetAt(dir, testNs1ID, shard, timestamp, 0)
   568  		require.NoError(t, err)
   569  		require.True(t, ok)
   570  		require.Equal(t, timestamp, res.ID.BlockStart)
   571  
   572  		err = DeleteFileSetAt(dir, testNs1ID, shard, timestamp, 0)
   573  		require.NoError(t, err)
   574  
   575  		res, ok, err = FileSetAt(dir, testNs1ID, shard, timestamp, 0)
   576  		require.NoError(t, err)
   577  		require.False(t, ok)
   578  	}
   579  }
   580  
   581  func TestFileSetAtNotExist(t *testing.T) {
   582  	shard := uint32(0)
   583  	dir := createDataFlushInfoFilesDir(t, testNs1ID, shard, 0)
   584  	defer os.RemoveAll(dir)
   585  
   586  	timestamp := xtime.UnixNano(0)
   587  	_, ok, err := FileSetAt(dir, testNs1ID, shard, timestamp, 0)
   588  	require.NoError(t, err)
   589  	require.False(t, ok)
   590  }
   591  
   592  func TestFileSetFilesNoFiles(t *testing.T) {
   593  	// Make empty directory
   594  	shard := uint32(0)
   595  	dir := createTempDir(t)
   596  	shardDir := path.Join(dir, "data", testNs1ID.String(), strconv.Itoa(int(shard)))
   597  	require.NoError(t, os.MkdirAll(shardDir, 0755))
   598  	defer os.RemoveAll(shardDir)
   599  
   600  	res, err := filesetFiles(filesetFilesSelector{
   601  		filePathPrefix: dir,
   602  		namespace:      testNs1ID,
   603  		shard:          shard,
   604  		pattern:        filesetFilePattern,
   605  	})
   606  	require.NoError(t, err)
   607  	require.Equal(t, 0, len(res))
   608  }
   609  
   610  func TestSnapshotFiles(t *testing.T) {
   611  	var (
   612  		shard          = uint32(0)
   613  		dir            = createTempDir(t)
   614  		filePathPrefix = filepath.Join(dir, "")
   615  	)
   616  	defer os.RemoveAll(dir)
   617  
   618  	// Write out snapshot file
   619  	writeOutTestSnapshot(t, filePathPrefix, shard, testWriterStart, 0)
   620  
   621  	// Load snapshot files
   622  	snapshotFiles, err := SnapshotFiles(filePathPrefix, testNs1ID, shard)
   623  	require.NoError(t, err)
   624  	require.Equal(t, 1, len(snapshotFiles))
   625  	snapshotTime, snapshotID, err := snapshotFiles[0].SnapshotTimeAndID()
   626  	require.NoError(t, err)
   627  	require.True(t, testWriterStart.Equal(snapshotTime))
   628  	require.Equal(t, testSnapshotID, snapshotID)
   629  	require.False(t, testWriterStart.IsZero())
   630  }
   631  
   632  func TestSnapshotFilesNoFiles(t *testing.T) {
   633  	// Make empty directory
   634  	shard := uint32(0)
   635  	dir := createTempDir(t)
   636  	shardDir := path.Join(dir, "snapshots", testNs1ID.String(), strconv.Itoa(int(shard)))
   637  	require.NoError(t, os.MkdirAll(shardDir, 0755))
   638  	defer os.RemoveAll(shardDir)
   639  
   640  	files, err := SnapshotFiles(dir, testNs1ID, shard)
   641  	require.NoError(t, err)
   642  	require.Equal(t, 0, len(files))
   643  	for i, snapshotFile := range files {
   644  		require.Equal(t, int64(i), int64(snapshotFile.ID.BlockStart))
   645  	}
   646  
   647  	require.Equal(t, 0, len(files.Filepaths()))
   648  }
   649  
   650  func TestNextSnapshotFileSetVolumeIndex(t *testing.T) {
   651  	var (
   652  		shard      = uint32(0)
   653  		dir        = createTempDir(t)
   654  		shardDir   = ShardSnapshotsDirPath(dir, testNs1ID, shard)
   655  		blockStart = xtime.Now().Truncate(time.Hour)
   656  	)
   657  	require.NoError(t, os.MkdirAll(shardDir, 0755))
   658  	defer os.RemoveAll(shardDir)
   659  
   660  	index, err := NextSnapshotFileSetVolumeIndex(
   661  		dir, testNs1ID, shard, blockStart)
   662  	require.NoError(t, err)
   663  	require.Equal(t, 0, index)
   664  
   665  	// Check increments properly
   666  	curr := -1
   667  	for i := 0; i <= 10; i++ {
   668  		index, err := NextSnapshotFileSetVolumeIndex(dir, testNs1ID,
   669  			shard, blockStart)
   670  		require.NoError(t, err)
   671  		require.Equal(t, curr+1, index)
   672  		curr = index
   673  
   674  		writeOutTestSnapshot(t, dir, shard, blockStart, index)
   675  	}
   676  }
   677  
   678  // TestSortedSnapshotMetadataFiles tests the SortedSnapshotMetadataFiles function by writing out
   679  // a number of valid snapshot metadata files (along with their checkpoint files), as
   680  // well as one invalid / corrupt one, and then asserts that the correct number of valid
   681  // and corrupt files are returned.
   682  func TestSortedSnapshotMetadataFiles(t *testing.T) {
   683  	var (
   684  		dir            = createTempDir(t)
   685  		filePathPrefix = filepath.Join(dir, "")
   686  		opts           = testDefaultOpts.
   687  				SetFilePathPrefix(filePathPrefix)
   688  		commitlogIdentifier = persist.CommitLogFile{
   689  			FilePath: "some_path",
   690  			Index:    0,
   691  		}
   692  		numMetadataFiles = 10
   693  	)
   694  	defer func() {
   695  		os.RemoveAll(dir)
   696  	}()
   697  
   698  	// Shoulld be no files before we write them out.
   699  	metadataFiles, errorsWithpaths, err := SortedSnapshotMetadataFiles(opts)
   700  	require.NoError(t, err)
   701  	require.Empty(t, errorsWithpaths)
   702  	require.Empty(t, metadataFiles)
   703  
   704  	// Write out a bunch of metadata files along with their corresponding checkpoints.
   705  	for i := 0; i < numMetadataFiles; i++ {
   706  		snapshotUUID := uuid.Parse("6645a373-bf82-42e7-84a6-f8452b137549")
   707  		require.NotNil(t, snapshotUUID)
   708  
   709  		snapshotMetadataIdentifier := SnapshotMetadataIdentifier{
   710  			Index: int64(i),
   711  			UUID:  snapshotUUID,
   712  		}
   713  
   714  		writer := NewSnapshotMetadataWriter(opts)
   715  		err := writer.Write(SnapshotMetadataWriteArgs{
   716  			ID:                  snapshotMetadataIdentifier,
   717  			CommitlogIdentifier: commitlogIdentifier,
   718  		})
   719  		require.NoError(t, err)
   720  
   721  		reader := NewSnapshotMetadataReader(opts)
   722  		snapshotMetadata, err := reader.Read(snapshotMetadataIdentifier)
   723  		require.NoError(t, err)
   724  
   725  		require.Equal(t, SnapshotMetadata{
   726  			ID:                  snapshotMetadataIdentifier,
   727  			CommitlogIdentifier: commitlogIdentifier,
   728  			MetadataFilePath: snapshotMetadataFilePathFromIdentifier(
   729  				filePathPrefix, snapshotMetadataIdentifier),
   730  			CheckpointFilePath: snapshotMetadataCheckpointFilePathFromIdentifier(
   731  				filePathPrefix, snapshotMetadataIdentifier),
   732  		}, snapshotMetadata)
   733  
   734  		// Corrupt the last file.
   735  		if i == numMetadataFiles-1 {
   736  			os.Remove(snapshotMetadataCheckpointFilePathFromIdentifier(
   737  				filePathPrefix, snapshotMetadataIdentifier))
   738  		}
   739  	}
   740  
   741  	metadataFiles, errorsWithpaths, err = SortedSnapshotMetadataFiles(opts)
   742  	require.NoError(t, err)
   743  	require.Len(t, errorsWithpaths, 1)
   744  	require.Len(t, metadataFiles, numMetadataFiles-1)
   745  
   746  	// Assert that they're sorted.
   747  	for i, file := range metadataFiles {
   748  		require.Equal(t, int64(i), file.ID.Index)
   749  	}
   750  }
   751  
   752  // TestNextSnapshotMetadataFileIndex tests the NextSnapshotMetadataFileIndex function by
   753  // writing out a number of SnapshotMetadata files and then ensuring that the NextSnapshotMetadataFileIndex
   754  // function returns the correct next index.
   755  func TestNextSnapshotMetadataFileIndex(t *testing.T) {
   756  	var (
   757  		dir            = createTempDir(t)
   758  		filePathPrefix = filepath.Join(dir, "")
   759  		opts           = testDefaultOpts.
   760  				SetFilePathPrefix(filePathPrefix)
   761  		commitlogIdentifier = persist.CommitLogFile{
   762  			FilePath: "some_path",
   763  			Index:    0,
   764  		}
   765  		numMetadataFiles = 10
   766  	)
   767  	defer func() {
   768  		os.RemoveAll(dir)
   769  	}()
   770  
   771  	// Shoulld be no files before we write them out.
   772  	metadataFiles, errorsWithpaths, err := SortedSnapshotMetadataFiles(opts)
   773  	require.NoError(t, err)
   774  	require.Empty(t, errorsWithpaths)
   775  	require.Empty(t, metadataFiles)
   776  
   777  	writer := NewSnapshotMetadataWriter(opts)
   778  	// Write out a bunch of metadata files along with their corresponding checkpoints.
   779  	for i := 0; i < numMetadataFiles; i++ {
   780  		snapshotUUID := uuid.Parse("6645a373-bf82-42e7-84a6-f8452b137549")
   781  		require.NotNil(t, snapshotUUID)
   782  
   783  		snapshotMetadataIdentifier := SnapshotMetadataIdentifier{
   784  			Index: int64(i),
   785  			UUID:  snapshotUUID,
   786  		}
   787  
   788  		err := writer.Write(SnapshotMetadataWriteArgs{
   789  			ID:                  snapshotMetadataIdentifier,
   790  			CommitlogIdentifier: commitlogIdentifier,
   791  		})
   792  		require.NoError(t, err)
   793  	}
   794  
   795  	nextIdx, err := NextSnapshotMetadataFileIndex(opts)
   796  	require.NoError(t, err)
   797  	// Snapshot metadata file indices are zero-based so if we wrote out
   798  	// numMetadataFiles, then the last index should be numMetadataFiles-1
   799  	// and the next one should be numMetadataFiles.
   800  	require.Equal(t, int64(numMetadataFiles), nextIdx)
   801  }
   802  
   803  func TestNextIndexFileSetVolumeIndex(t *testing.T) {
   804  	// Make empty directory
   805  	dir := createTempDir(t)
   806  	dataDir := NamespaceIndexDataDirPath(dir, testNs1ID)
   807  	require.NoError(t, os.MkdirAll(dataDir, 0755))
   808  	defer os.RemoveAll(dataDir)
   809  
   810  	blockStart := xtime.Now().Truncate(time.Hour)
   811  
   812  	// Check increments properly
   813  	curr := -1
   814  	for i := 0; i <= 10; i++ {
   815  		index, err := NextIndexFileSetVolumeIndex(dir, testNs1ID, blockStart)
   816  		require.NoError(t, err)
   817  		require.Equal(t, curr+1, index)
   818  		curr = index
   819  
   820  		p := FilesetPathFromTimeAndIndex(dataDir, blockStart, index, CheckpointFileSuffix)
   821  
   822  		digestBuf := digest.NewBuffer()
   823  		digestBuf.WriteDigest(digest.Checksum([]byte("bar")))
   824  
   825  		err = ioutil.WriteFile(p, digestBuf, defaultNewFileMode)
   826  		require.NoError(t, err)
   827  	}
   828  }
   829  
   830  func TestMultipleForBlockStart(t *testing.T) {
   831  	numSnapshots := 20
   832  	numSnapshotsPerBlock := 4
   833  	shard := uint32(0)
   834  	dir := createTempDir(t)
   835  	defer os.RemoveAll(dir)
   836  	shardDir := path.Join(dir, snapshotDirName, testNs1ID.String(), strconv.Itoa(int(shard)))
   837  	require.NoError(t, os.MkdirAll(shardDir, 0755))
   838  
   839  	// Write out many files with the same blockStart, but different indices
   840  	ts := xtime.UnixNano(1)
   841  	for i := 0; i < numSnapshots; i++ {
   842  		volume := i % numSnapshotsPerBlock
   843  		// Periodically update the blockStart
   844  		if volume == 0 {
   845  			ts = xtime.UnixNano(int64(i + 1))
   846  		}
   847  
   848  		writeOutTestSnapshot(t, dir, shard, ts, volume)
   849  	}
   850  
   851  	files, err := SnapshotFiles(dir, testNs1ID, shard)
   852  	require.NoError(t, err)
   853  	require.Equal(t, 20, len(files))
   854  	require.Equal(t, 20*7, len(files.Filepaths()))
   855  
   856  	// Make sure LatestForBlock works even if the input list is not sorted properly
   857  	for i := range files {
   858  		if i+1 < len(files) {
   859  			files[i], files[i+1] = files[i+1], files[i]
   860  		}
   861  	}
   862  
   863  	latestSnapshot, ok := files.LatestVolumeForBlock(ts)
   864  	require.True(t, ok)
   865  	require.Equal(t, numSnapshotsPerBlock-1, latestSnapshot.ID.VolumeIndex)
   866  }
   867  
   868  func TestSnapshotFileHasCompleteCheckpointFile(t *testing.T) {
   869  	dir := createTempDir(t)
   870  	defer os.RemoveAll(dir)
   871  
   872  	checkpointFilePath := path.Join(dir, "123-checkpoint-0.db")
   873  
   874  	// Test a valid complete checkpoint file
   875  	digestBuffer := digest.NewBuffer()
   876  	digestBuffer.WriteDigest(digest.Checksum([]byte{1, 2, 3}))
   877  	err := ioutil.WriteFile(checkpointFilePath, digestBuffer, defaultNewFileMode)
   878  	require.NoError(t, err)
   879  
   880  	// Check validates a valid checkpoint file
   881  	f := FileSetFile{
   882  		AbsoluteFilePaths: []string{checkpointFilePath},
   883  	}
   884  	require.Equal(t, true, f.HasCompleteCheckpointFile())
   885  
   886  	// Check fails when checkpoint exists but not valid
   887  	err = ioutil.WriteFile(checkpointFilePath, []byte{42}, defaultNewFileMode)
   888  	require.NoError(t, err)
   889  	f = FileSetFile{
   890  		AbsoluteFilePaths: []string{checkpointFilePath},
   891  	}
   892  	require.Equal(t, false, f.HasCompleteCheckpointFile())
   893  
   894  	// Check ignores index file path
   895  	indexFilePath := path.Join(dir, "123-index-0.db")
   896  	f = FileSetFile{
   897  		AbsoluteFilePaths: []string{indexFilePath},
   898  	}
   899  	require.Equal(t, false, f.HasCompleteCheckpointFile())
   900  }
   901  
   902  func TestSnapshotDirPath(t *testing.T) {
   903  	require.Equal(t, "prefix/snapshots", SnapshotDirPath("prefix"))
   904  }
   905  
   906  func TestNamespaceSnapshotsDirPath(t *testing.T) {
   907  	expected := "prefix/snapshots/testNs"
   908  	actual := NamespaceSnapshotsDirPath("prefix", testNs1ID)
   909  	require.Equal(t, expected, actual)
   910  }
   911  
   912  func TestShardSnapshotsDirPath(t *testing.T) {
   913  	expected := "prefix/snapshots/testNs/0"
   914  	actual := ShardSnapshotsDirPath("prefix", testNs1ID, 0)
   915  	require.Equal(t, expected, actual)
   916  }
   917  
   918  func TestSnapshotFileSetExistsAt(t *testing.T) {
   919  	var (
   920  		shard     = uint32(0)
   921  		ts        = xtime.UnixNano(1)
   922  		dir       = createTempDir(t)
   923  		shardPath = ShardSnapshotsDirPath(dir, testNs1ID, 0)
   924  	)
   925  	require.NoError(t, os.MkdirAll(shardPath, 0755))
   926  
   927  	writeOutTestSnapshot(t, dir, shard, ts, 0)
   928  
   929  	exists, err := SnapshotFileSetExistsAt(dir, testNs1ID, testSnapshotID, shard, ts)
   930  	require.NoError(t, err)
   931  	require.True(t, exists)
   932  }
   933  
   934  func TestSortedCommitLogFiles(t *testing.T) {
   935  	iter := 20
   936  	dir := createCommitLogFiles(t, iter)
   937  	defer os.RemoveAll(dir)
   938  
   939  	files, err := SortedCommitLogFiles(CommitLogsDirPath(dir))
   940  	require.NoError(t, err)
   941  	require.Equal(t, iter, len(files))
   942  
   943  	for i := 0; i < iter; i++ {
   944  		require.Equal(
   945  			t,
   946  			path.Join(dir, "commitlogs", fmt.Sprintf("commitlog-0-%d.db", i)),
   947  			files[i])
   948  	}
   949  }
   950  
   951  func TestIndexFileSetAt(t *testing.T) {
   952  	dir := createTempDir(t)
   953  	defer os.RemoveAll(dir)
   954  
   955  	var (
   956  		ns1     = ident.StringID("abc")
   957  		now     = xtime.Now().Truncate(time.Hour)
   958  		timeFor = func(n int) xtime.UnixNano { return now.Add(time.Hour * time.Duration(n)) }
   959  	)
   960  
   961  	files := indexFileSetFileIdentifiers{
   962  		indexFileSetFileIdentifier{
   963  			FileSetFileIdentifier: FileSetFileIdentifier{
   964  				BlockStart:         timeFor(1),
   965  				Namespace:          ns1,
   966  				VolumeIndex:        0,
   967  				FileSetContentType: persist.FileSetIndexContentType,
   968  			},
   969  			Suffix: CheckpointFileSuffix,
   970  		},
   971  	}
   972  	files.create(t, dir)
   973  
   974  	results, err := IndexFileSetsAt(dir, ns1, timeFor(1))
   975  	require.NoError(t, err)
   976  	require.Len(t, results, 1)
   977  }
   978  
   979  func TestIndexFileSetAtIgnoresLackOfCheckpoint(t *testing.T) {
   980  	dir := createTempDir(t)
   981  	defer os.RemoveAll(dir)
   982  
   983  	var (
   984  		ns1     = ident.StringID("abc")
   985  		now     = xtime.Now().Truncate(time.Hour)
   986  		timeFor = func(n int) xtime.UnixNano { return now.Add(time.Hour * time.Duration(n)) }
   987  	)
   988  
   989  	files := indexFileSetFileIdentifiers{
   990  		indexFileSetFileIdentifier{
   991  			FileSetFileIdentifier: FileSetFileIdentifier{
   992  				BlockStart:         timeFor(1),
   993  				Namespace:          ns1,
   994  				VolumeIndex:        0,
   995  				FileSetContentType: persist.FileSetIndexContentType,
   996  			},
   997  			Suffix: CheckpointFileSuffix,
   998  		},
   999  		indexFileSetFileIdentifier{
  1000  			FileSetFileIdentifier: FileSetFileIdentifier{
  1001  				BlockStart:         timeFor(2),
  1002  				Namespace:          ns1,
  1003  				VolumeIndex:        0,
  1004  				FileSetContentType: persist.FileSetIndexContentType,
  1005  			},
  1006  			Suffix: InfoFileSuffix,
  1007  		},
  1008  	}
  1009  	files.create(t, dir)
  1010  
  1011  	results, err := IndexFileSetsAt(dir, ns1, timeFor(1))
  1012  	require.NoError(t, err)
  1013  	require.Len(t, results, 1)
  1014  }
  1015  
  1016  func TestIndexFileSetAtMultiple(t *testing.T) {
  1017  	dir := createTempDir(t)
  1018  	defer os.RemoveAll(dir)
  1019  
  1020  	var (
  1021  		ns1     = ident.StringID("abc")
  1022  		now     = xtime.Now().Truncate(time.Hour)
  1023  		timeFor = func(n int) xtime.UnixNano { return now.Add(time.Hour * time.Duration(n)) }
  1024  	)
  1025  
  1026  	files := indexFileSetFileIdentifiers{
  1027  		indexFileSetFileIdentifier{
  1028  			FileSetFileIdentifier: FileSetFileIdentifier{
  1029  				BlockStart:         timeFor(1),
  1030  				Namespace:          ns1,
  1031  				VolumeIndex:        0,
  1032  				FileSetContentType: persist.FileSetIndexContentType,
  1033  			},
  1034  			Suffix: CheckpointFileSuffix,
  1035  		},
  1036  		indexFileSetFileIdentifier{
  1037  			FileSetFileIdentifier: FileSetFileIdentifier{
  1038  				BlockStart:         timeFor(1),
  1039  				Namespace:          ns1,
  1040  				VolumeIndex:        1,
  1041  				FileSetContentType: persist.FileSetIndexContentType,
  1042  			},
  1043  			Suffix: CheckpointFileSuffix,
  1044  		},
  1045  		indexFileSetFileIdentifier{
  1046  			FileSetFileIdentifier: FileSetFileIdentifier{
  1047  				BlockStart:         timeFor(1),
  1048  				Namespace:          ns1,
  1049  				VolumeIndex:        2,
  1050  				FileSetContentType: persist.FileSetIndexContentType,
  1051  			},
  1052  			Suffix: CheckpointFileSuffix,
  1053  		},
  1054  	}
  1055  	files.create(t, dir)
  1056  
  1057  	results, err := IndexFileSetsAt(dir, ns1, timeFor(1))
  1058  	require.NoError(t, err)
  1059  	require.Len(t, results, 3)
  1060  	for i := range files {
  1061  		require.Equal(t, files[i].Namespace.String(), results[i].ID.Namespace.String())
  1062  		require.Equal(t, files[i].BlockStart, results[i].ID.BlockStart)
  1063  		require.Equal(t, files[i].VolumeIndex, results[i].ID.VolumeIndex)
  1064  	}
  1065  }
  1066  
  1067  func TestIndexFileSetsBefore(t *testing.T) {
  1068  	dir := createTempDir(t)
  1069  	defer os.RemoveAll(dir)
  1070  
  1071  	var (
  1072  		ns1     = ident.StringID("abc")
  1073  		now     = xtime.Now().Truncate(time.Hour)
  1074  		timeFor = func(n int) xtime.UnixNano { return now.Add(time.Hour * time.Duration(n)) }
  1075  	)
  1076  
  1077  	files := indexFileSetFileIdentifiers{
  1078  		indexFileSetFileIdentifier{
  1079  			FileSetFileIdentifier: FileSetFileIdentifier{
  1080  				BlockStart:         timeFor(1),
  1081  				Namespace:          ns1,
  1082  				VolumeIndex:        0,
  1083  				FileSetContentType: persist.FileSetIndexContentType,
  1084  			},
  1085  			Suffix: CheckpointFileSuffix,
  1086  		},
  1087  		indexFileSetFileIdentifier{
  1088  			FileSetFileIdentifier: FileSetFileIdentifier{
  1089  				BlockStart:         timeFor(1),
  1090  				Namespace:          ns1,
  1091  				VolumeIndex:        1,
  1092  				FileSetContentType: persist.FileSetIndexContentType,
  1093  			},
  1094  			Suffix: CheckpointFileSuffix,
  1095  		},
  1096  		indexFileSetFileIdentifier{
  1097  			FileSetFileIdentifier: FileSetFileIdentifier{
  1098  				BlockStart:         timeFor(2),
  1099  				Namespace:          ns1,
  1100  				VolumeIndex:        0,
  1101  				FileSetContentType: persist.FileSetIndexContentType,
  1102  			},
  1103  			Suffix: CheckpointFileSuffix,
  1104  		},
  1105  		indexFileSetFileIdentifier{
  1106  			FileSetFileIdentifier: FileSetFileIdentifier{
  1107  				BlockStart:         timeFor(3),
  1108  				Namespace:          ns1,
  1109  				VolumeIndex:        0,
  1110  				FileSetContentType: persist.FileSetIndexContentType,
  1111  			},
  1112  			Suffix: CheckpointFileSuffix,
  1113  		},
  1114  	}
  1115  	files.create(t, dir)
  1116  
  1117  	results, err := IndexFileSetsBefore(dir, ns1, timeFor(3))
  1118  	require.NoError(t, err)
  1119  	require.Len(t, results, 3)
  1120  	for _, res := range results {
  1121  		require.False(t, strings.Contains(res, fmt.Sprintf("%d", timeFor(3))))
  1122  	}
  1123  }
  1124  
  1125  func TestSnapshotFileSnapshotTimeAndID(t *testing.T) {
  1126  	var (
  1127  		dir            = createTempDir(t)
  1128  		filePathPrefix = filepath.Join(dir, "")
  1129  	)
  1130  	defer os.RemoveAll(dir)
  1131  
  1132  	// Write out snapshot file
  1133  	writeOutTestSnapshot(t, filePathPrefix, 0, testWriterStart, 0)
  1134  
  1135  	// Load snapshot files
  1136  	snapshotFiles, err := SnapshotFiles(filePathPrefix, testNs1ID, 0)
  1137  	require.NoError(t, err)
  1138  	require.Equal(t, 1, len(snapshotFiles))
  1139  
  1140  	// Verify SnapshotTimeAndID() returns the expected time
  1141  	snapshotTime, snapshotID, err := SnapshotTimeAndID(filePathPrefix, snapshotFiles[0].ID)
  1142  	require.NoError(t, err)
  1143  	require.Equal(t, true, testWriterStart.Equal(snapshotTime))
  1144  	require.Equal(t, testSnapshotID, snapshotID)
  1145  }
  1146  
  1147  func TestSnapshotFileSnapshotTimeAndIDZeroValue(t *testing.T) {
  1148  	f := FileSetFile{}
  1149  	_, _, err := f.SnapshotTimeAndID()
  1150  	require.Equal(t, errSnapshotTimeAndIDZero, err)
  1151  }
  1152  
  1153  func TestSnapshotFileSnapshotTimeAndIDNotSnapshot(t *testing.T) {
  1154  	f := FileSetFile{}
  1155  	f.AbsoluteFilePaths = []string{"/var/lib/m3db/data/fileset-data.db"}
  1156  	_, _, err := f.SnapshotTimeAndID()
  1157  	require.Error(t, err)
  1158  }
  1159  
  1160  func TestCommitLogFilePath(t *testing.T) {
  1161  	expected := "/var/lib/m3db/commitlogs/commitlog-0-1.db"
  1162  	actual := CommitLogFilePath("/var/lib/m3db", 1)
  1163  	require.Equal(t, expected, actual)
  1164  }
  1165  
  1166  func createTempFile(t *testing.T) *os.File {
  1167  	fd, err := ioutil.TempFile("", "testfile")
  1168  	require.NoError(t, err)
  1169  	return fd
  1170  }
  1171  
  1172  func createTempDir(t *testing.T) string {
  1173  	dir, err := ioutil.TempDir("", "testdir")
  1174  	if err != nil {
  1175  		t.Fatal(err)
  1176  	}
  1177  	return dir
  1178  }
  1179  
  1180  func createFile(t *testing.T, filePath string, b []byte) {
  1181  	fd, err := os.Create(filePath)
  1182  	require.NoError(t, err)
  1183  	if b != nil {
  1184  		fd.Write(b)
  1185  	}
  1186  	fd.Close()
  1187  }
  1188  
  1189  func createDataFlushInfoFilesDir(t *testing.T, namespace ident.ID, shard uint32, iter int) string {
  1190  	return createDataInfoFiles(t, dataDirName, namespace, shard, iter, false)
  1191  }
  1192  
  1193  func createDataCheckpointFilesDir(t *testing.T, namespace ident.ID, shard uint32, iter int) string {
  1194  	return createDataCheckpointFiles(t, dataDirName, namespace, shard, iter, false)
  1195  }
  1196  
  1197  func createDataInfoFiles(t *testing.T, subDirName string, namespace ident.ID, shard uint32, iter int, isSnapshot bool) string {
  1198  	return createDataFiles(t, subDirName, namespace, shard, iter, isSnapshot, InfoFileSuffix)
  1199  }
  1200  
  1201  func createDataCheckpointFiles(t *testing.T, subDirName string, namespace ident.ID, shard uint32, iter int, isSnapshot bool) string {
  1202  	return createDataFiles(t, subDirName, namespace, shard, iter, isSnapshot, CheckpointFileSuffix)
  1203  }
  1204  
  1205  func createDataFilesWithVolumeIndex(t *testing.T,
  1206  	subDirName string, namespace ident.ID, shard uint32, iter int, isSnapshot bool, fileSuffix string, volumeIndex int,
  1207  ) string {
  1208  	dir := createTempDir(t)
  1209  	shardDir := path.Join(dir, subDirName, namespace.String(), strconv.Itoa(int(shard)))
  1210  	require.NoError(t, os.MkdirAll(shardDir, 0755))
  1211  	for i := 0; i < iter; i++ {
  1212  		ts := xtime.UnixNano(int64(i))
  1213  		var infoFilePath string
  1214  		if isSnapshot {
  1215  			infoFilePath = FilesetPathFromTimeAndIndex(shardDir, ts, volumeIndex, fileSuffix)
  1216  		} else {
  1217  			infoFilePath = filesetPathFromTimeLegacy(shardDir, ts, fileSuffix)
  1218  		}
  1219  		var contents []byte
  1220  		if fileSuffix == CheckpointFileSuffix {
  1221  			// If writing a checkpoint file then write out a checksum of contents
  1222  			// so that when code that validates the checkpoint file runs it returns
  1223  			// successfully
  1224  			digestBuf := digest.NewBuffer()
  1225  			digestBuf.WriteDigest(digest.Checksum(contents))
  1226  			contents = []byte(digestBuf)
  1227  		}
  1228  		createFile(t, infoFilePath, contents)
  1229  	}
  1230  	return dir
  1231  }
  1232  
  1233  func createDataFiles(t *testing.T,
  1234  	subDirName string, namespace ident.ID, shard uint32, iter int, isSnapshot bool, fileSuffix string,
  1235  ) string {
  1236  	return createDataFilesWithVolumeIndex(t, subDirName, namespace, shard, iter, isSnapshot, fileSuffix, 0)
  1237  }
  1238  
  1239  type indexFileSetFileIdentifier struct {
  1240  	FileSetFileIdentifier
  1241  	Suffix string
  1242  }
  1243  
  1244  type indexFileSetFileIdentifiers []indexFileSetFileIdentifier
  1245  
  1246  func (indexFilesets indexFileSetFileIdentifiers) create(t *testing.T, prefixDir string) {
  1247  	for _, fileset := range indexFilesets {
  1248  		idents := fileSetFileIdentifiers{fileset.FileSetFileIdentifier}
  1249  		idents.create(t, prefixDir, persist.FileSetFlushType, fileset.Suffix)
  1250  	}
  1251  }
  1252  
  1253  type fileSetFileIdentifiers []FileSetFileIdentifier
  1254  
  1255  func (filesets fileSetFileIdentifiers) create(t *testing.T, prefixDir string, fileSetType persist.FileSetType, fileSuffixes ...string) {
  1256  	writeFile := func(t *testing.T, path string, contents []byte) {
  1257  		if strings.Contains(path, CheckpointFileSuffix) {
  1258  			// If writing a checkpoint file then write out a checksum of contents
  1259  			// so that when code that validates the checkpoint file runs it returns
  1260  			// successfully
  1261  			digestBuf := digest.NewBuffer()
  1262  			digestBuf.WriteDigest(digest.Checksum(contents))
  1263  			contents = []byte(digestBuf)
  1264  		}
  1265  		createFile(t, path, contents)
  1266  	}
  1267  
  1268  	for _, suffix := range fileSuffixes {
  1269  		for _, fileset := range filesets {
  1270  			switch fileset.FileSetContentType {
  1271  			case persist.FileSetDataContentType:
  1272  				ns := fileset.Namespace.String()
  1273  				shard := fileset.Shard
  1274  				blockStart := fileset.BlockStart
  1275  				shardDir := path.Join(prefixDir, dataDirName, ns, strconv.Itoa(int(shard)))
  1276  				require.NoError(t, os.MkdirAll(shardDir, 0755))
  1277  				var path string
  1278  				switch fileSetType {
  1279  				case persist.FileSetFlushType:
  1280  					path = filesetPathFromTimeLegacy(shardDir, blockStart, suffix)
  1281  					writeFile(t, path, nil)
  1282  				case persist.FileSetSnapshotType:
  1283  					path = FilesetPathFromTimeAndIndex(shardDir, blockStart, 0, fileSuffix)
  1284  					writeFile(t, path, nil)
  1285  				default:
  1286  					panic("unknown FileSetType")
  1287  				}
  1288  			case persist.FileSetIndexContentType:
  1289  				ns := fileset.Namespace.String()
  1290  				blockStart := fileset.BlockStart
  1291  				volumeIndex := fileset.VolumeIndex
  1292  				indexDir := path.Join(prefixDir, indexDirName, dataDirName, ns)
  1293  				require.NoError(t, os.MkdirAll(indexDir, 0755))
  1294  				var path string
  1295  				switch fileSetType {
  1296  				case persist.FileSetFlushType:
  1297  					path = FilesetPathFromTimeAndIndex(indexDir, blockStart, volumeIndex, suffix)
  1298  					writeFile(t, path, nil)
  1299  				case persist.FileSetSnapshotType:
  1300  					fallthrough
  1301  				default:
  1302  					panic("unknown FileSetType")
  1303  				}
  1304  			default:
  1305  				panic("unknown file type")
  1306  			}
  1307  		}
  1308  	}
  1309  }
  1310  
  1311  func createDataFile(t *testing.T, shardDir string, blockStart xtime.UnixNano,
  1312  	suffix string, b []byte) {
  1313  	filePath := filesetPathFromTimeLegacy(shardDir, blockStart, suffix)
  1314  	createFile(t, filePath, b)
  1315  }
  1316  
  1317  func createCommitLogFiles(t *testing.T, iter int) string {
  1318  	dir := createTempDir(t)
  1319  	commitLogsDir := path.Join(dir, commitLogsDirName)
  1320  	assert.NoError(t, os.Mkdir(commitLogsDir, 0755))
  1321  	for i := 0; i < iter; i++ {
  1322  		filePath := CommitLogFilePath(dir, i)
  1323  		fd, err := os.Create(filePath)
  1324  		assert.NoError(t, err)
  1325  		assert.NoError(t, fd.Close())
  1326  	}
  1327  	return dir
  1328  }
  1329  
  1330  //nolint: unparam
  1331  func writeOutTestSnapshot(
  1332  	t *testing.T, filePathPrefix string,
  1333  	shard uint32, blockStart xtime.UnixNano, volume int) {
  1334  	var (
  1335  		entries = []testEntry{
  1336  			{"foo", nil, []byte{1, 2, 3}},
  1337  			{"bar", nil, []byte{4, 5, 6}},
  1338  			{"baz", nil, make([]byte, 65536)},
  1339  			{"cat", nil, make([]byte, 100000)},
  1340  			{"echo", nil, []byte{7, 8, 9}},
  1341  		}
  1342  		w = newTestWriter(t, filePathPrefix)
  1343  	)
  1344  	defer w.Close()
  1345  
  1346  	writeTestDataWithVolume(
  1347  		t, w, shard, blockStart, volume, entries, persist.FileSetSnapshotType)
  1348  }
  1349  
  1350  func mustFileExists(t *testing.T, path string) bool {
  1351  	exists, err := FileExists(path)
  1352  	require.NoError(t, err)
  1353  	return exists
  1354  }