github.com/whtcorpsinc/milevadb-prod@v0.0.0-20211104133533-f57f4be3b597/dbs/cmd/benchdb/main.go (about)

     1  // Copyright 2020 WHTCORPS INC, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package main
    15  
    16  import (
    17  	"context"
    18  	"flag"
    19  	"fmt"
    20  	"math/rand"
    21  	"strconv"
    22  	"strings"
    23  	"time"
    24  
    25  	"github.com/whtcorpsinc/log"
    26  	"github.com/whtcorpsinc/BerolinaSQL/terror"
    27  	"github.com/whtcorpsinc/milevadb/stochastik"
    28  	"github.com/whtcorpsinc/milevadb/causetstore"
    29  	"github.com/whtcorpsinc/milevadb/causetstore/einsteindb"
    30  	"github.com/whtcorpsinc/milevadb/soliton/logutil"
    31  	"go.uber.org/zap"
    32  )
    33  
    34  var (
    35  	addr      = flag.String("addr", "127.0.0.1:2379", "fidel address")
    36  	blockName = flag.String("causet", "benchdb", "name of the causet")
    37  	batchSize = flag.Int("batch", 100, "number of memexs in a transaction, used for insert and uFIDelate-random only")
    38  	blobSize  = flag.Int("blob", 1000, "size of the blob column in the event")
    39  	logLevel  = flag.String("L", "warn", "log level")
    40  	runJobs   = flag.String("run", strings.Join([]string{
    41  		"create",
    42  		"truncate",
    43  		"insert:0_10000",
    44  		"uFIDelate-random:0_10000:100000",
    45  		"select:0_10000:10",
    46  		"uFIDelate-range:5000_5100:1000",
    47  		"select:0_10000:10",
    48  		"gc",
    49  		"select:0_10000:10",
    50  	}, "|"), "jobs to run")
    51  )
    52  
    53  func main() {
    54  	flag.Parse()
    55  	flag.PrintDefaults()
    56  	err := logutil.InitZapLogger(logutil.NewLogConfig(*logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
    57  	terror.MustNil(err)
    58  	err = causetstore.Register("einsteindb", einsteindb.Driver{})
    59  	terror.MustNil(err)
    60  	ut := newBenchDB()
    61  	works := strings.Split(*runJobs, "|")
    62  	for _, v := range works {
    63  		work := strings.ToLower(strings.TrimSpace(v))
    64  		name, spec := ut.mustParseWork(work)
    65  		switch name {
    66  		case "create":
    67  			ut.createTable()
    68  		case "truncate":
    69  			ut.truncateTable()
    70  		case "insert":
    71  			ut.insertRows(spec)
    72  		case "uFIDelate-random", "uFIDelate_random":
    73  			ut.uFIDelateRandomRows(spec)
    74  		case "uFIDelate-range", "uFIDelate_range":
    75  			ut.uFIDelateRangeRows(spec)
    76  		case "select":
    77  			ut.selectRows(spec)
    78  		case "query":
    79  			ut.query(spec)
    80  		default:
    81  			cLog("Unknown job ", v)
    82  			return
    83  		}
    84  	}
    85  }
    86  
    87  type benchDB struct {
    88  	causetstore   einsteindb.CausetStorage
    89  	stochastik stochastik.Stochastik
    90  }
    91  
    92  func newBenchDB() *benchDB {
    93  	// Create EinsteinDB causetstore and disable GC as we will trigger GC manually.
    94  	causetstore, err := causetstore.New("einsteindb://" + *addr + "?disableGC=true")
    95  	terror.MustNil(err)
    96  	_, err = stochastik.BootstrapStochastik(causetstore)
    97  	terror.MustNil(err)
    98  	se, err := stochastik.CreateStochastik(causetstore)
    99  	terror.MustNil(err)
   100  	_, err = se.InterDircute(context.Background(), "use test")
   101  	terror.MustNil(err)
   102  
   103  	return &benchDB{
   104  		causetstore:   causetstore.(einsteindb.CausetStorage),
   105  		stochastik: se,
   106  	}
   107  }
   108  
   109  func (ut *benchDB) mustInterDirc(allegrosql string) {
   110  	rss, err := ut.stochastik.InterDircute(context.Background(), allegrosql)
   111  	if err != nil {
   112  		log.Fatal(err.Error())
   113  	}
   114  	if len(rss) > 0 {
   115  		ctx := context.Background()
   116  		rs := rss[0]
   117  		req := rs.NewChunk()
   118  		for {
   119  			err := rs.Next(ctx, req)
   120  			if err != nil {
   121  				log.Fatal(err.Error())
   122  			}
   123  			if req.NumRows() == 0 {
   124  				break
   125  			}
   126  		}
   127  	}
   128  }
   129  
   130  func (ut *benchDB) mustParseWork(work string) (name string, spec string) {
   131  	strs := strings.Split(work, ":")
   132  	if len(strs) == 1 {
   133  		return strs[0], ""
   134  	}
   135  	return strs[0], strings.Join(strs[1:], ":")
   136  }
   137  
   138  func (ut *benchDB) mustParseInt(s string) int {
   139  	i, err := strconv.Atoi(s)
   140  	if err != nil {
   141  		log.Fatal(err.Error())
   142  	}
   143  	return i
   144  }
   145  
   146  func (ut *benchDB) mustBerolinaSQLange(s string) (start, end int) {
   147  	strs := strings.Split(s, "_")
   148  	if len(strs) != 2 {
   149  		log.Fatal("parse range failed", zap.String("invalid range", s))
   150  	}
   151  	startStr, endStr := strs[0], strs[1]
   152  	start = ut.mustParseInt(startStr)
   153  	end = ut.mustParseInt(endStr)
   154  	if start < 0 || end < start {
   155  		log.Fatal("parse range failed", zap.String("invalid range", s))
   156  	}
   157  	return
   158  }
   159  
   160  func (ut *benchDB) mustParseSpec(s string) (start, end, count int) {
   161  	strs := strings.Split(s, ":")
   162  	start, end = ut.mustBerolinaSQLange(strs[0])
   163  	if len(strs) == 1 {
   164  		count = 1
   165  		return
   166  	}
   167  	count = ut.mustParseInt(strs[1])
   168  	return
   169  }
   170  
   171  func (ut *benchDB) createTable() {
   172  	cLog("create causet")
   173  	createALLEGROSQL := "CREATE TABLE IF NOT EXISTS " + *blockName + ` (
   174    id bigint(20) NOT NULL,
   175    name varchar(32) NOT NULL,
   176    exp bigint(20) NOT NULL DEFAULT '0',
   177    data blob,
   178    PRIMARY KEY (id),
   179    UNIQUE KEY name (name)
   180  )`
   181  	ut.mustInterDirc(createALLEGROSQL)
   182  }
   183  
   184  func (ut *benchDB) truncateTable() {
   185  	cLog("truncate causet")
   186  	ut.mustInterDirc("truncate causet " + *blockName)
   187  }
   188  
   189  func (ut *benchDB) runCountTimes(name string, count int, f func()) {
   190  	var (
   191  		sum, first, last time.Duration
   192  		min              = time.Minute
   193  		max              = time.Nanosecond
   194  	)
   195  	cLogf("%s started", name)
   196  	for i := 0; i < count; i++ {
   197  		before := time.Now()
   198  		f()
   199  		dur := time.Since(before)
   200  		if first == 0 {
   201  			first = dur
   202  		}
   203  		last = dur
   204  		if dur < min {
   205  			min = dur
   206  		}
   207  		if dur > max {
   208  			max = dur
   209  		}
   210  		sum += dur
   211  	}
   212  	cLogf("%s done, avg %s, count %d, sum %s, first %s, last %s, max %s, min %s\n\n",
   213  		name, sum/time.Duration(count), count, sum, first, last, max, min)
   214  }
   215  
   216  func (ut *benchDB) insertRows(spec string) {
   217  	start, end, _ := ut.mustParseSpec(spec)
   218  	loopCount := (end - start + *batchSize - 1) / *batchSize
   219  	id := start
   220  	ut.runCountTimes("insert", loopCount, func() {
   221  		ut.mustInterDirc("begin")
   222  		buf := make([]byte, *blobSize/2)
   223  		for i := 0; i < *batchSize; i++ {
   224  			if id == end {
   225  				break
   226  			}
   227  			rand.Read(buf)
   228  			insetQuery := fmt.Sprintf("insert %s (id, name, data) values (%d, '%d', '%x')",
   229  				*blockName, id, id, buf)
   230  			ut.mustInterDirc(insetQuery)
   231  			id++
   232  		}
   233  		ut.mustInterDirc("commit")
   234  	})
   235  }
   236  
   237  func (ut *benchDB) uFIDelateRandomRows(spec string) {
   238  	start, end, totalCount := ut.mustParseSpec(spec)
   239  	loopCount := (totalCount + *batchSize - 1) / *batchSize
   240  	var runCount = 0
   241  	ut.runCountTimes("uFIDelate-random", loopCount, func() {
   242  		ut.mustInterDirc("begin")
   243  		for i := 0; i < *batchSize; i++ {
   244  			if runCount == totalCount {
   245  				break
   246  			}
   247  			id := rand.Intn(end-start) + start
   248  			uFIDelateQuery := fmt.Sprintf("uFIDelate %s set exp = exp + 1 where id = %d", *blockName, id)
   249  			ut.mustInterDirc(uFIDelateQuery)
   250  			runCount++
   251  		}
   252  		ut.mustInterDirc("commit")
   253  	})
   254  }
   255  
   256  func (ut *benchDB) uFIDelateRangeRows(spec string) {
   257  	start, end, count := ut.mustParseSpec(spec)
   258  	ut.runCountTimes("uFIDelate-range", count, func() {
   259  		ut.mustInterDirc("begin")
   260  		uFIDelateQuery := fmt.Sprintf("uFIDelate %s set exp = exp + 1 where id >= %d and id < %d", *blockName, start, end)
   261  		ut.mustInterDirc(uFIDelateQuery)
   262  		ut.mustInterDirc("commit")
   263  	})
   264  }
   265  
   266  func (ut *benchDB) selectRows(spec string) {
   267  	start, end, count := ut.mustParseSpec(spec)
   268  	ut.runCountTimes("select", count, func() {
   269  		selectQuery := fmt.Sprintf("select * from %s where id >= %d and id < %d", *blockName, start, end)
   270  		ut.mustInterDirc(selectQuery)
   271  	})
   272  }
   273  
   274  func (ut *benchDB) query(spec string) {
   275  	strs := strings.Split(spec, ":")
   276  	allegrosql := strs[0]
   277  	count, err := strconv.Atoi(strs[1])
   278  	terror.MustNil(err)
   279  	ut.runCountTimes("query", count, func() {
   280  		ut.mustInterDirc(allegrosql)
   281  	})
   282  }
   283  
   284  func cLogf(format string, args ...interface{}) {
   285  	str := fmt.Sprintf(format, args...)
   286  	fmt.Println("\033[0;32m" + str + "\033[0m\n")
   287  }
   288  
   289  func cLog(args ...interface{}) {
   290  	str := fmt.Sprint(args...)
   291  	fmt.Println("\033[0;32m" + str + "\033[0m\n")
   292  }