github.com/m3db/m3@v1.5.0/src/dbnode/persist/fs/writer_benchmark_test.go (about)

     1  // Copyright (c) 2019 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  package fs
    21  
    22  import (
    23  	"fmt"
    24  	"io/ioutil"
    25  	"os"
    26  	"path/filepath"
    27  	"sync"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/m3db/m3/src/dbnode/persist"
    32  	xsync "github.com/m3db/m3/src/x/sync"
    33  	xtime "github.com/m3db/m3/src/x/time"
    34  )
    35  
    36  // Benchmarks run on a production machine with 32 cores and non server-grade, non NVME SSD drives.
    37  // goos: linux
    38  // goarch: amd64
    39  // pkg: github.com/m3db/m3/src/dbnode/persist/fs
    40  // BenchmarkCreateEmptyFilesets/parallelism:_2,_numShards:_1-32         	   10000	    135045 ns/op
    41  // BenchmarkCreateEmptyFilesets/parallelism:_2,_numShards:_256-32       	   10000	    124712 ns/op
    42  // BenchmarkCreateEmptyFilesets/parallelism:_2,_numShards:_1024-32      	   10000	    149700 ns/op
    43  // BenchmarkCreateEmptyFilesets/parallelism:_4,_numShards:_1-32         	   20000	     86291 ns/op
    44  // BenchmarkCreateEmptyFilesets/parallelism:_4,_numShards:_256-32       	   20000	     94382 ns/op
    45  // BenchmarkCreateEmptyFilesets/parallelism:_4,_numShards:_1024-32      	   20000	    102477 ns/op
    46  // BenchmarkCreateEmptyFilesets/parallelism:_8,_numShards:_1-32         	   20000	     62403 ns/op
    47  // BenchmarkCreateEmptyFilesets/parallelism:_8,_numShards:_256-32       	   20000	     68515 ns/op
    48  // BenchmarkCreateEmptyFilesets/parallelism:_8,_numShards:_1024-32      	   20000	     72531 ns/op
    49  // BenchmarkCreateEmptyFilesets/parallelism:_16,_numShards:_1-32        	   30000	     51230 ns/op
    50  // BenchmarkCreateEmptyFilesets/parallelism:_16,_numShards:_256-32      	   50000	     41634 ns/op
    51  // BenchmarkCreateEmptyFilesets/parallelism:_16,_numShards:_1024-32     	   30000	     48799 ns/op
    52  // BenchmarkCreateEmptyFilesets/parallelism:_32,_numShards:_1-32        	   30000	     46718 ns/op
    53  // BenchmarkCreateEmptyFilesets/parallelism:_32,_numShards:_256-32      	   50000	     38207 ns/op
    54  // BenchmarkCreateEmptyFilesets/parallelism:_32,_numShards:_1024-32     	   30000	     40722 ns/op
    55  // BenchmarkCreateEmptyFilesets/parallelism:_64,_numShards:_1-32        	   30000	     42638 ns/op
    56  // BenchmarkCreateEmptyFilesets/parallelism:_64,_numShards:_256-32      	   50000	     34545 ns/op
    57  // BenchmarkCreateEmptyFilesets/parallelism:_64,_numShards:_1024-32     	   30000	     37479 ns/op
    58  // BenchmarkCreateEmptyFilesets/parallelism:_128,_numShards:_1-32       	   30000	     40628 ns/op
    59  // BenchmarkCreateEmptyFilesets/parallelism:_128,_numShards:_256-32     	   50000	     34262 ns/op
    60  // BenchmarkCreateEmptyFilesets/parallelism:_128,_numShards:_1024-32    	   30000	     37234 ns/op
    61  // BenchmarkCreateEmptyFilesets/parallelism:_256,_numShards:_1-32       	   50000	     39045 ns/op
    62  // BenchmarkCreateEmptyFilesets/parallelism:_256,_numShards:_256-32     	   50000	     33717 ns/op
    63  // BenchmarkCreateEmptyFilesets/parallelism:_256,_numShards:_1024-32    	   30000	     37385 ns/op
    64  // BenchmarkCreateEmptyFilesets/parallelism:_512,_numShards:_1-32       	   50000	     38813 ns/op
    65  // BenchmarkCreateEmptyFilesets/parallelism:_512,_numShards:_256-32     	   50000	     33760 ns/op
    66  // BenchmarkCreateEmptyFilesets/parallelism:_512,_numShards:_1024-32    	   30000	     36175 ns/op
    67  // BenchmarkCreateEmptyFilesets/parallelism:_1024,_numShards:_1-32      	   50000	     46628 ns/op
    68  // BenchmarkCreateEmptyFilesets/parallelism:_1024,_numShards:_256-32    	   50000	     33590 ns/op
    69  // BenchmarkCreateEmptyFilesets/parallelism:_1024,_numShards:_1024-32   	   30000	     34465 ns/op
    70  // BenchmarkCreateEmptyFilesets/parallelism:_2048,_numShards:_1-32      	   50000	     40628 ns/op
    71  // BenchmarkCreateEmptyFilesets/parallelism:_2048,_numShards:_256-32    	   50000	     31257 ns/op
    72  // BenchmarkCreateEmptyFilesets/parallelism:_2048,_numShards:_1024-32   	   30000	     34975 ns/op
    73  // BenchmarkCreateEmptyFilesets/parallelism:_4096,_numShards:_1-32      	   30000	     40306 ns/op
    74  // BenchmarkCreateEmptyFilesets/parallelism:_4096,_numShards:_256-32    	   50000	     34649 ns/op
    75  // BenchmarkCreateEmptyFilesets/parallelism:_4096,_numShards:_1024-32   	   30000	     38800 ns/op
    76  func BenchmarkCreateEmptyFilesets(b *testing.B) {
    77  	type benchEmptyFileset struct {
    78  		parallelism int
    79  		numShards   int
    80  	}
    81  
    82  	testCases := []benchEmptyFileset{}
    83  	for i := 2; i <= 4096; i *= 2 {
    84  		testCases = append(testCases, benchEmptyFileset{
    85  			parallelism: i,
    86  			numShards:   1,
    87  		})
    88  		testCases = append(testCases, benchEmptyFileset{
    89  			parallelism: i,
    90  			numShards:   256,
    91  		})
    92  		testCases = append(testCases, benchEmptyFileset{
    93  			parallelism: i,
    94  			numShards:   1024,
    95  		})
    96  	}
    97  
    98  	for _, tc := range testCases {
    99  		title := fmt.Sprintf(
   100  			"parallelism: %d, numShards: %d",
   101  			tc.parallelism, tc.numShards)
   102  
   103  		b.Run(title, func(b *testing.B) {
   104  			benchmarkCreateEmptyFilesets(b, tc.parallelism, tc.numShards)
   105  		})
   106  	}
   107  }
   108  
   109  func benchmarkCreateEmptyFilesets(b *testing.B, parallelism, numShards int) {
   110  	dir, err := ioutil.TempDir("", "testdir")
   111  	if err != nil {
   112  		panic(err)
   113  	}
   114  	filePathPrefix := filepath.Join(dir, "")
   115  	defer os.RemoveAll(dir)
   116  
   117  	var (
   118  		blockSize = 2 * time.Hour
   119  		start     = xtime.Now().Truncate(blockSize)
   120  	)
   121  
   122  	workerPool, err := xsync.NewPooledWorkerPool(
   123  		parallelism, xsync.NewPooledWorkerPoolOptions())
   124  	if err != nil {
   125  		panic(err)
   126  	}
   127  	workerPool.Init()
   128  
   129  	var wg sync.WaitGroup
   130  	for i := 0; i < b.N; i++ {
   131  		wg.Add(1)
   132  
   133  		writerOpts := DataWriterOpenOptions{
   134  			Identifier: FileSetFileIdentifier{
   135  				Namespace:   testNs1ID,
   136  				Shard:       uint32(i % numShards),
   137  				BlockStart:  start,
   138  				VolumeIndex: i,
   139  			},
   140  			BlockSize:   testBlockSize,
   141  			FileSetType: persist.FileSetFlushType,
   142  		}
   143  
   144  		workerPool.Go(func() {
   145  			writer, err := NewWriter(testDefaultOpts.
   146  				SetFilePathPrefix(filePathPrefix).
   147  				SetWriterBufferSize(testWriterBufferSize))
   148  			if err != nil {
   149  				panic(err)
   150  			}
   151  
   152  			if err := writer.Open(writerOpts); err != nil {
   153  				panic(err)
   154  			}
   155  			if err := writer.Close(); err != nil {
   156  				panic(err)
   157  			}
   158  			wg.Done()
   159  		})
   160  	}
   161  
   162  	wg.Wait()
   163  }