github.com/whtcorpsinc/milevadb-prod@v0.0.0-20211104133533-f57f4be3b597/allegrosql/server/http_handler.go (about)

     1  // Copyright 2020 WHTCORPS INC, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package server
    15  
    16  import (
    17  	"bytes"
    18  	"context"
    19  	"encoding/base64"
    20  	"encoding/hex"
    21  	"encoding/json"
    22  	"fmt"
    23  	"math"
    24  	"net/http"
    25  	"net/url"
    26  	"strconv"
    27  	"strings"
    28  	"sync/atomic"
    29  	"time"
    30  
    31  	"github.com/gorilla/mux"
    32  	log "github.com/sirupsen/logrus"
    33  	"github.com/whtcorpsinc/BerolinaSQL/perceptron"
    34  	"github.com/whtcorpsinc/BerolinaSQL/terror"
    35  	"github.com/whtcorpsinc/ekvproto/pkg/ekvrpcpb"
    36  	"github.com/whtcorpsinc/ekvproto/pkg/spacetimepb"
    37  	"github.com/whtcorpsinc/errors"
    38  	"github.com/whtcorpsinc/failpoint"
    39  	"github.com/whtcorpsinc/milevadb/blockcodec"
    40  	"github.com/whtcorpsinc/milevadb/causet"
    41  	"github.com/whtcorpsinc/milevadb/causet/blocks"
    42  	"github.com/whtcorpsinc/milevadb/causetstore/einsteindb"
    43  	"github.com/whtcorpsinc/milevadb/causetstore/einsteindb/einsteindbrpc"
    44  	"github.com/whtcorpsinc/milevadb/causetstore/einsteindb/gcworker"
    45  	"github.com/whtcorpsinc/milevadb/causetstore/helper"
    46  	"github.com/whtcorpsinc/milevadb/config"
    47  	"github.com/whtcorpsinc/milevadb/ekv"
    48  	"github.com/whtcorpsinc/milevadb/interlock"
    49  	"github.com/whtcorpsinc/milevadb/petri"
    50  	"github.com/whtcorpsinc/milevadb/petri/infosync"
    51  	"github.com/whtcorpsinc/milevadb/schemareplicant"
    52  	"github.com/whtcorpsinc/milevadb/soliton"
    53  	"github.com/whtcorpsinc/milevadb/soliton/FIDelapi"
    54  	"github.com/whtcorpsinc/milevadb/soliton/admin"
    55  	"github.com/whtcorpsinc/milevadb/soliton/codec"
    56  	"github.com/whtcorpsinc/milevadb/soliton/gcutil"
    57  	"github.com/whtcorpsinc/milevadb/soliton/logutil"
    58  	"github.com/whtcorpsinc/milevadb/spacetime"
    59  	"github.com/whtcorpsinc/milevadb/stochastik"
    60  	"github.com/whtcorpsinc/milevadb/stochastikctx"
    61  	"github.com/whtcorpsinc/milevadb/stochastikctx/binloginfo"
    62  	"github.com/whtcorpsinc/milevadb/stochastikctx/stmtctx"
    63  	"github.com/whtcorpsinc/milevadb/stochastikctx/variable"
    64  	"github.com/whtcorpsinc/milevadb/types"
    65  	"go.uber.org/zap"
    66  )
    67  
    68  const (
    69  	FIDelBName          = "EDB"
    70  	pHexKey             = "hexKey"
    71  	pIndexName          = "index"
    72  	pHandle             = "handle"
    73  	pRegionID           = "regionID"
    74  	pStartTS            = "startTS"
    75  	pBlockName          = "causet"
    76  	pBlockID            = "blockID"
    77  	pDeferredCausetID   = "defCausID"
    78  	pDeferredCausetTp   = "defCausTp"
    79  	pDeferredCausetFlag = "defCausFlag"
    80  	pDeferredCausetLen  = "defCausLen"
    81  	pRowBin             = "rowBin"
    82  	pSnapshot           = "snapshot"
    83  )
    84  
    85  // For query string
    86  const (
    87  	qBlockID   = "block_id"
    88  	qLimit     = "limit"
    89  	qOperation = "op"
    90  	qSeconds   = "seconds"
    91  )
    92  
    93  const (
    94  	headerContentType = "Content-Type"
    95  	contentTypeJSON   = "application/json"
    96  )
    97  
    98  func writeError(w http.ResponseWriter, err error) {
    99  	w.WriteHeader(http.StatusBadRequest)
   100  	_, err = w.Write([]byte(err.Error()))
   101  	terror.Log(errors.Trace(err))
   102  }
   103  
   104  func writeData(w http.ResponseWriter, data interface{}) {
   105  	js, err := json.MarshalIndent(data, "", " ")
   106  	if err != nil {
   107  		writeError(w, err)
   108  		return
   109  	}
   110  	// write response
   111  	w.Header().Set(headerContentType, contentTypeJSON)
   112  	w.WriteHeader(http.StatusOK)
   113  	_, err = w.Write(js)
   114  	terror.Log(errors.Trace(err))
   115  }
   116  
   117  type einsteindbHandlerTool struct {
   118  	helper.Helper
   119  }
   120  
   121  // newEinsteinDBHandlerTool checks and prepares for einsteindb handler.
   122  // It would panic when any error happens.
   123  func (s *Server) newEinsteinDBHandlerTool() *einsteindbHandlerTool {
   124  	var einsteindbStore einsteindb.CausetStorage
   125  	causetstore, ok := s.driver.(*MilevaDBDriver)
   126  	if !ok {
   127  		panic("Invalid EkvStore with illegal driver")
   128  	}
   129  
   130  	if einsteindbStore, ok = causetstore.causetstore.(einsteindb.CausetStorage); !ok {
   131  		panic("Invalid EkvStore with illegal causetstore")
   132  	}
   133  
   134  	regionCache := einsteindbStore.GetRegionCache()
   135  
   136  	return &einsteindbHandlerTool{
   137  		helper.Helper{
   138  			RegionCache: regionCache,
   139  			CausetStore: einsteindbStore,
   140  		},
   141  	}
   142  }
   143  
   144  type mvccKV struct {
   145  	Key      string                         `json:"key"`
   146  	RegionID uint64                         `json:"region_id"`
   147  	Value    *ekvrpcpb.MvccGetByKeyResponse `json:"value"`
   148  }
   149  
   150  func (t *einsteindbHandlerTool) getRegionIDByKey(encodedKey []byte) (uint64, error) {
   151  	keyLocation, err := t.RegionCache.LocateKey(einsteindb.NewBackofferWithVars(context.Background(), 500, nil), encodedKey)
   152  	if err != nil {
   153  		return 0, err
   154  	}
   155  	return keyLocation.Region.GetID(), nil
   156  }
   157  
   158  func (t *einsteindbHandlerTool) getMvccByHandle(blockID, handle int64) (*mvccKV, error) {
   159  	encodedKey := blockcodec.EncodeRowKeyWithHandle(blockID, ekv.IntHandle(handle))
   160  	data, err := t.GetMvccByEncodedKey(encodedKey)
   161  	if err != nil {
   162  		return nil, err
   163  	}
   164  	regionID, err := t.getRegionIDByKey(encodedKey)
   165  	if err != nil {
   166  		return nil, err
   167  	}
   168  	return &mvccKV{Key: strings.ToUpper(hex.EncodeToString(encodedKey)), Value: data, RegionID: regionID}, err
   169  }
   170  
   171  func (t *einsteindbHandlerTool) getMvccByStartTs(startTS uint64, startKey, endKey ekv.Key) (*mvccKV, error) {
   172  	bo := einsteindb.NewBackofferWithVars(context.Background(), 5000, nil)
   173  	for {
   174  		curRegion, err := t.RegionCache.LocateKey(bo, startKey)
   175  		if err != nil {
   176  			logutil.BgLogger().Error("get MVCC by startTS failed", zap.Uint64("txnStartTS", startTS),
   177  				zap.Stringer("startKey", startKey), zap.Error(err))
   178  			return nil, errors.Trace(err)
   179  		}
   180  
   181  		einsteindbReq := einsteindbrpc.NewRequest(einsteindbrpc.CmdMvccGetByStartTs, &ekvrpcpb.MvccGetByStartTsRequest{
   182  			StartTs: startTS,
   183  		})
   184  		einsteindbReq.Context.Priority = ekvrpcpb.CommandPri_Low
   185  		ekvResp, err := t.CausetStore.SendReq(bo, einsteindbReq, curRegion.Region, time.Hour)
   186  		if err != nil {
   187  			logutil.BgLogger().Error("get MVCC by startTS failed",
   188  				zap.Uint64("txnStartTS", startTS),
   189  				zap.Stringer("startKey", startKey),
   190  				zap.Reflect("region", curRegion.Region),
   191  				zap.Stringer("curRegion startKey", curRegion.StartKey),
   192  				zap.Stringer("curRegion endKey", curRegion.EndKey),
   193  				zap.Reflect("ekvResp", ekvResp),
   194  				zap.Error(err))
   195  			return nil, errors.Trace(err)
   196  		}
   197  		data := ekvResp.Resp.(*ekvrpcpb.MvccGetByStartTsResponse)
   198  		if err := data.GetRegionError(); err != nil {
   199  			logutil.BgLogger().Warn("get MVCC by startTS failed",
   200  				zap.Uint64("txnStartTS", startTS),
   201  				zap.Stringer("startKey", startKey),
   202  				zap.Reflect("region", curRegion.Region),
   203  				zap.Stringer("curRegion startKey", curRegion.StartKey),
   204  				zap.Stringer("curRegion endKey", curRegion.EndKey),
   205  				zap.Reflect("ekvResp", ekvResp),
   206  				zap.Stringer("error", err))
   207  			continue
   208  		}
   209  
   210  		if len(data.GetError()) > 0 {
   211  			logutil.BgLogger().Error("get MVCC by startTS failed",
   212  				zap.Uint64("txnStartTS", startTS),
   213  				zap.Stringer("startKey", startKey),
   214  				zap.Reflect("region", curRegion.Region),
   215  				zap.Stringer("curRegion startKey", curRegion.StartKey),
   216  				zap.Stringer("curRegion endKey", curRegion.EndKey),
   217  				zap.Reflect("ekvResp", ekvResp),
   218  				zap.String("error", data.GetError()))
   219  			return nil, errors.New(data.GetError())
   220  		}
   221  
   222  		key := data.GetKey()
   223  		if len(key) > 0 {
   224  			resp := &ekvrpcpb.MvccGetByKeyResponse{Info: data.Info, RegionError: data.RegionError, Error: data.Error}
   225  			return &mvccKV{Key: strings.ToUpper(hex.EncodeToString(key)), Value: resp, RegionID: curRegion.Region.GetID()}, nil
   226  		}
   227  
   228  		if len(endKey) > 0 && curRegion.Contains(endKey) {
   229  			return nil, nil
   230  		}
   231  		if len(curRegion.EndKey) == 0 {
   232  			return nil, nil
   233  		}
   234  		startKey = curRegion.EndKey
   235  	}
   236  }
   237  
   238  func (t *einsteindbHandlerTool) getMvccByIdxValue(idx causet.Index, values url.Values, idxDefCauss []*perceptron.DeferredCausetInfo, handleStr string) (*mvccKV, error) {
   239  	sc := new(stmtctx.StatementContext)
   240  	// HTTP request is not a database stochastik, set timezone to UTC directly here.
   241  	// See https://github.com/whtcorpsinc/milevadb/blob/master/docs/milevadb_http_api.md for more details.
   242  	sc.TimeZone = time.UTC
   243  	idxRow, err := t.formValue2CausetRow(sc, values, idxDefCauss)
   244  	if err != nil {
   245  		return nil, errors.Trace(err)
   246  	}
   247  	handle, err := strconv.ParseInt(handleStr, 10, 64)
   248  	if err != nil {
   249  		return nil, errors.Trace(err)
   250  	}
   251  	encodedKey, _, err := idx.GenIndexKey(sc, idxRow, ekv.IntHandle(handle), nil)
   252  	if err != nil {
   253  		return nil, errors.Trace(err)
   254  	}
   255  	data, err := t.GetMvccByEncodedKey(encodedKey)
   256  	if err != nil {
   257  		return nil, err
   258  	}
   259  	regionID, err := t.getRegionIDByKey(encodedKey)
   260  	if err != nil {
   261  		return nil, err
   262  	}
   263  	return &mvccKV{strings.ToUpper(hex.EncodeToString(encodedKey)), regionID, data}, err
   264  }
   265  
   266  // formValue2CausetRow converts URL query string to a Causet Row.
   267  func (t *einsteindbHandlerTool) formValue2CausetRow(sc *stmtctx.StatementContext, values url.Values, idxDefCauss []*perceptron.DeferredCausetInfo) ([]types.Causet, error) {
   268  	data := make([]types.Causet, len(idxDefCauss))
   269  	for i, defCaus := range idxDefCauss {
   270  		defCausName := defCaus.Name.String()
   271  		vals, ok := values[defCausName]
   272  		if !ok {
   273  			return nil, errors.BadRequestf("Missing value for index defCausumn %s.", defCausName)
   274  		}
   275  
   276  		switch len(vals) {
   277  		case 0:
   278  			data[i].SetNull()
   279  		case 1:
   280  			bCauset := types.NewStringCauset(vals[0])
   281  			cCauset, err := bCauset.ConvertTo(sc, &defCaus.FieldType)
   282  			if err != nil {
   283  				return nil, errors.Trace(err)
   284  			}
   285  			data[i] = cCauset
   286  		default:
   287  			return nil, errors.BadRequestf("Invalid query form for defCausumn '%s', it's values are %v."+
   288  				" DeferredCauset value should be unique for one index record.", defCausName, vals)
   289  		}
   290  	}
   291  	return data, nil
   292  }
   293  
   294  func (t *einsteindbHandlerTool) getBlockID(dbName, blockName string) (int64, error) {
   295  	tbl, err := t.getBlock(dbName, blockName)
   296  	if err != nil {
   297  		return 0, errors.Trace(err)
   298  	}
   299  	return tbl.GetPhysicalID(), nil
   300  }
   301  
   302  func (t *einsteindbHandlerTool) getBlock(dbName, blockName string) (causet.PhysicalBlock, error) {
   303  	schemaReplicant, err := t.schemaReplicant()
   304  	if err != nil {
   305  		return nil, errors.Trace(err)
   306  	}
   307  	blockName, partitionName := extractBlockAndPartitionName(blockName)
   308  	blockVal, err := schemaReplicant.BlockByName(perceptron.NewCIStr(dbName), perceptron.NewCIStr(blockName))
   309  	if err != nil {
   310  		return nil, errors.Trace(err)
   311  	}
   312  	return t.getPartition(blockVal, partitionName)
   313  }
   314  
   315  func (t *einsteindbHandlerTool) getPartition(blockVal causet.Block, partitionName string) (causet.PhysicalBlock, error) {
   316  	if pt, ok := blockVal.(causet.PartitionedBlock); ok {
   317  		if partitionName == "" {
   318  			return blockVal.(causet.PhysicalBlock), errors.New("work on partitioned causet, please specify the causet name like this: causet(partition)")
   319  		}
   320  		tblInfo := pt.Meta()
   321  		pid, err := blocks.FindPartitionByName(tblInfo, partitionName)
   322  		if err != nil {
   323  			return nil, errors.Trace(err)
   324  		}
   325  		return pt.GetPartition(pid), nil
   326  	}
   327  	if partitionName != "" {
   328  		return nil, fmt.Errorf("%s is not a partitionted causet", blockVal.Meta().Name)
   329  	}
   330  	return blockVal.(causet.PhysicalBlock), nil
   331  }
   332  
   333  func (t *einsteindbHandlerTool) schemaReplicant() (schemareplicant.SchemaReplicant, error) {
   334  	stochastik, err := stochastik.CreateStochastik(t.CausetStore)
   335  	if err != nil {
   336  		return nil, errors.Trace(err)
   337  	}
   338  	return petri.GetPetri(stochastik.(stochastikctx.Context)).SchemaReplicant(), nil
   339  }
   340  
   341  func (t *einsteindbHandlerTool) handleMvccGetByHex(params map[string]string) (*mvccKV, error) {
   342  	encodedKey, err := hex.DecodeString(params[pHexKey])
   343  	if err != nil {
   344  		return nil, errors.Trace(err)
   345  	}
   346  	data, err := t.GetMvccByEncodedKey(encodedKey)
   347  	if err != nil {
   348  		return nil, errors.Trace(err)
   349  	}
   350  	regionID, err := t.getRegionIDByKey(encodedKey)
   351  	if err != nil {
   352  		return nil, err
   353  	}
   354  	return &mvccKV{Key: strings.ToUpper(params[pHexKey]), Value: data, RegionID: regionID}, nil
   355  }
   356  
   357  // settingsHandler is the handler for list milevadb server settings.
   358  type settingsHandler struct {
   359  }
   360  
   361  // binlogRecover is used to recover binlog service.
   362  // When config binlog IgnoreError, binlog service will stop after meeting the first error.
   363  // It can be recovered using HTTP API.
   364  type binlogRecover struct{}
   365  
   366  // schemaHandler is the handler for list database or causet schemas.
   367  type schemaHandler struct {
   368  	*einsteindbHandlerTool
   369  }
   370  
   371  type dbBlockHandler struct {
   372  	*einsteindbHandlerTool
   373  }
   374  
   375  type flashReplicaHandler struct {
   376  	*einsteindbHandlerTool
   377  }
   378  
   379  // regionHandler is the common field for http handler. It contains
   380  // some common functions for all handlers.
   381  type regionHandler struct {
   382  	*einsteindbHandlerTool
   383  }
   384  
   385  // blockHandler is the handler for list causet's regions.
   386  type blockHandler struct {
   387  	*einsteindbHandlerTool
   388  	op string
   389  }
   390  
   391  // dbsHistoryJobHandler is the handler for list job history.
   392  type dbsHistoryJobHandler struct {
   393  	*einsteindbHandlerTool
   394  }
   395  
   396  // dbsResignTenantHandler is the handler for resigning dbs tenant.
   397  type dbsResignTenantHandler struct {
   398  	causetstore ekv.CausetStorage
   399  }
   400  
   401  type serverInfoHandler struct {
   402  	*einsteindbHandlerTool
   403  }
   404  
   405  type allServerInfoHandler struct {
   406  	*einsteindbHandlerTool
   407  }
   408  
   409  type profileHandler struct {
   410  	*einsteindbHandlerTool
   411  }
   412  
   413  // valueHandler is the handler for get value.
   414  type valueHandler struct {
   415  }
   416  
   417  const (
   418  	opBlockRegions     = "regions"
   419  	opBlockDiskUsage   = "disk-usage"
   420  	opBlockScatter     = "scatter-causet"
   421  	opStopBlockScatter = "stop-scatter-causet"
   422  )
   423  
   424  // mvccTxnHandler is the handler for txn debugger.
   425  type mvccTxnHandler struct {
   426  	*einsteindbHandlerTool
   427  	op string
   428  }
   429  
   430  const (
   431  	opMvccGetByHex = "hex"
   432  	opMvccGetByKey = "key"
   433  	opMvccGetByIdx = "idx"
   434  	opMvccGetByTxn = "txn"
   435  )
   436  
   437  // ServeHTTP handles request of list a database or causet's schemas.
   438  func (vh valueHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
   439  	// parse params
   440  	params := mux.Vars(req)
   441  
   442  	defCausID, err := strconv.ParseInt(params[pDeferredCausetID], 0, 64)
   443  	if err != nil {
   444  		writeError(w, err)
   445  		return
   446  	}
   447  	defCausTp, err := strconv.ParseInt(params[pDeferredCausetTp], 0, 64)
   448  	if err != nil {
   449  		writeError(w, err)
   450  		return
   451  	}
   452  	defCausFlag, err := strconv.ParseUint(params[pDeferredCausetFlag], 0, 64)
   453  	if err != nil {
   454  		writeError(w, err)
   455  		return
   456  	}
   457  	defCausLen, err := strconv.ParseInt(params[pDeferredCausetLen], 0, 64)
   458  	if err != nil {
   459  		writeError(w, err)
   460  		return
   461  	}
   462  
   463  	// Get the unchanged binary.
   464  	if req.URL == nil {
   465  		err = errors.BadRequestf("Invalid URL")
   466  		writeError(w, err)
   467  		return
   468  	}
   469  	values := make(url.Values)
   470  	shouldUnescape := false
   471  	err = parseQuery(req.URL.RawQuery, values, shouldUnescape)
   472  	if err != nil {
   473  		writeError(w, err)
   474  		return
   475  	}
   476  	if len(values[pRowBin]) != 1 {
   477  		err = errors.BadRequestf("Invalid Query:%v", values[pRowBin])
   478  		writeError(w, err)
   479  		return
   480  	}
   481  	bin := values[pRowBin][0]
   482  	valData, err := base64.StdEncoding.DecodeString(bin)
   483  	if err != nil {
   484  		writeError(w, err)
   485  		return
   486  	}
   487  	// Construct field type.
   488  	defaultDecimal := 6
   489  	ft := &types.FieldType{
   490  		Tp:      byte(defCausTp),
   491  		Flag:    uint(defCausFlag),
   492  		Flen:    int(defCausLen),
   493  		Decimal: defaultDecimal,
   494  	}
   495  	// Decode a defCausumn.
   496  	m := make(map[int64]*types.FieldType, 1)
   497  	m[defCausID] = ft
   498  	loc := time.UTC
   499  	vals, err := blockcodec.DecodeRowToCausetMap(valData, m, loc)
   500  	if err != nil {
   501  		writeError(w, err)
   502  		return
   503  	}
   504  
   505  	v := vals[defCausID]
   506  	val, err := v.ToString()
   507  	if err != nil {
   508  		writeError(w, err)
   509  		return
   510  	}
   511  	writeData(w, val)
   512  }
   513  
   514  // BlockRegions is the response data for list causet's regions.
   515  // It contains regions list for record and indices.
   516  type BlockRegions struct {
   517  	BlockName     string         `json:"name"`
   518  	BlockID       int64          `json:"id"`
   519  	RecordRegions []RegionMeta   `json:"record_regions"`
   520  	Indices       []IndexRegions `json:"indices"`
   521  }
   522  
   523  // RegionMeta contains a region's peer detail
   524  type RegionMeta struct {
   525  	ID          uint64                   `json:"region_id"`
   526  	Leader      *spacetimepb.Peer        `json:"leader"`
   527  	Peers       []*spacetimepb.Peer      `json:"peers"`
   528  	RegionEpoch *spacetimepb.RegionEpoch `json:"region_epoch"`
   529  }
   530  
   531  // IndexRegions is the region info for one index.
   532  type IndexRegions struct {
   533  	Name    string       `json:"name"`
   534  	ID      int64        `json:"id"`
   535  	Regions []RegionMeta `json:"regions"`
   536  }
   537  
   538  // RegionDetail is the response data for get region by ID
   539  // it includes indices and records detail in current region.
   540  type RegionDetail struct {
   541  	RegionID uint64              `json:"region_id"`
   542  	StartKey []byte              `json:"start_key"`
   543  	EndKey   []byte              `json:"end_key"`
   544  	Frames   []*helper.FrameItem `json:"frames"`
   545  }
   546  
   547  // addBlockInRange insert a causet into RegionDetail
   548  // with index's id or record in the range if r.
   549  func (rt *RegionDetail) addBlockInRange(dbName string, curBlock *perceptron.BlockInfo, r *helper.RegionFrameRange) {
   550  	tName := curBlock.Name.String()
   551  	tID := curBlock.ID
   552  	pi := curBlock.GetPartitionInfo()
   553  	isCommonHandle := curBlock.IsCommonHandle
   554  	for _, index := range curBlock.Indices {
   555  		if index.Primary && isCommonHandle {
   556  			continue
   557  		}
   558  		if pi != nil {
   559  			for _, def := range pi.Definitions {
   560  				if f := r.GetIndexFrame(def.ID, index.ID, dbName, fmt.Sprintf("%s(%s)", tName, def.Name.O), index.Name.String()); f != nil {
   561  					rt.Frames = append(rt.Frames, f)
   562  				}
   563  			}
   564  		} else {
   565  			if f := r.GetIndexFrame(tID, index.ID, dbName, tName, index.Name.String()); f != nil {
   566  				rt.Frames = append(rt.Frames, f)
   567  			}
   568  		}
   569  
   570  	}
   571  
   572  	if pi != nil {
   573  		for _, def := range pi.Definitions {
   574  			if f := r.GetRecordFrame(def.ID, dbName, fmt.Sprintf("%s(%s)", tName, def.Name.O), isCommonHandle); f != nil {
   575  				rt.Frames = append(rt.Frames, f)
   576  			}
   577  		}
   578  	} else {
   579  		if f := r.GetRecordFrame(tID, dbName, tName, isCommonHandle); f != nil {
   580  			rt.Frames = append(rt.Frames, f)
   581  		}
   582  	}
   583  }
   584  
   585  // FrameItem includes a index's or record's spacetime data with causet's info.
   586  type FrameItem struct {
   587  	DBName      string   `json:"db_name"`
   588  	BlockName   string   `json:"block_name"`
   589  	BlockID     int64    `json:"block_id"`
   590  	IsRecord    bool     `json:"is_record"`
   591  	RecordID    int64    `json:"record_id,omitempty"`
   592  	IndexName   string   `json:"index_name,omitempty"`
   593  	IndexID     int64    `json:"index_id,omitempty"`
   594  	IndexValues []string `json:"index_values,omitempty"`
   595  }
   596  
   597  // RegionFrameRange contains a frame range info which the region covered.
   598  type RegionFrameRange struct {
   599  	first  *FrameItem              // start frame of the region
   600  	last   *FrameItem              // end frame of the region
   601  	region *einsteindb.KeyLocation // the region
   602  }
   603  
   604  func (t *einsteindbHandlerTool) getRegionsMeta(regionIDs []uint64) ([]RegionMeta, error) {
   605  	regions := make([]RegionMeta, len(regionIDs))
   606  	for i, regionID := range regionIDs {
   607  		region, err := t.RegionCache.FIDelClient().GetRegionByID(context.TODO(), regionID)
   608  		if err != nil {
   609  			return nil, errors.Trace(err)
   610  		}
   611  
   612  		failpoint.Inject("errGetRegionByIDEmpty", func(val failpoint.Value) {
   613  			if val.(bool) {
   614  				region.Meta = nil
   615  			}
   616  		})
   617  
   618  		if region.Meta == nil {
   619  			return nil, errors.Errorf("region not found for regionID %q", regionID)
   620  		}
   621  		regions[i] = RegionMeta{
   622  			ID:          regionID,
   623  			Leader:      region.Leader,
   624  			Peers:       region.Meta.Peers,
   625  			RegionEpoch: region.Meta.RegionEpoch,
   626  		}
   627  
   628  	}
   629  	return regions, nil
   630  }
   631  
   632  // ServeHTTP handles request of list milevadb server settings.
   633  func (h settingsHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
   634  	if req.Method == "POST" {
   635  		err := req.ParseForm()
   636  		if err != nil {
   637  			writeError(w, err)
   638  			return
   639  		}
   640  		if levelStr := req.Form.Get("log_level"); levelStr != "" {
   641  			err1 := logutil.SetLevel(levelStr)
   642  			if err1 != nil {
   643  				writeError(w, err1)
   644  				return
   645  			}
   646  
   647  			l, err1 := log.ParseLevel(levelStr)
   648  			if err1 != nil {
   649  				writeError(w, err1)
   650  				return
   651  			}
   652  			log.SetLevel(l)
   653  
   654  			config.GetGlobalConfig().Log.Level = levelStr
   655  		}
   656  		if generalLog := req.Form.Get("milevadb_general_log"); generalLog != "" {
   657  			switch generalLog {
   658  			case "0":
   659  				atomic.StoreUint32(&variable.ProcessGeneralLog, 0)
   660  			case "1":
   661  				atomic.StoreUint32(&variable.ProcessGeneralLog, 1)
   662  			default:
   663  				writeError(w, errors.New("illegal argument"))
   664  				return
   665  			}
   666  		}
   667  		if dbsSlowThreshold := req.Form.Get("dbs_slow_threshold"); dbsSlowThreshold != "" {
   668  			threshold, err1 := strconv.Atoi(dbsSlowThreshold)
   669  			if err1 != nil {
   670  				writeError(w, err1)
   671  				return
   672  			}
   673  			if threshold > 0 {
   674  				atomic.StoreUint32(&variable.DBSSlowOprThreshold, uint32(threshold))
   675  			}
   676  		}
   677  		if checkMb4ValueInUtf8 := req.Form.Get("check_mb4_value_in_utf8"); checkMb4ValueInUtf8 != "" {
   678  			switch checkMb4ValueInUtf8 {
   679  			case "0":
   680  				config.GetGlobalConfig().CheckMb4ValueInUTF8 = false
   681  			case "1":
   682  				config.GetGlobalConfig().CheckMb4ValueInUTF8 = true
   683  			default:
   684  				writeError(w, errors.New("illegal argument"))
   685  				return
   686  			}
   687  		}
   688  	} else {
   689  		writeData(w, config.GetGlobalConfig())
   690  	}
   691  }
   692  
   693  // ServeHTTP recovers binlog service.
   694  func (h binlogRecover) ServeHTTP(w http.ResponseWriter, req *http.Request) {
   695  	op := req.FormValue(qOperation)
   696  	switch op {
   697  	case "reset":
   698  		binloginfo.ResetSkippedCommitterCounter()
   699  	case "nowait":
   700  		binloginfo.DisableSkipBinlogFlag()
   701  	case "status":
   702  	default:
   703  		sec, err := strconv.ParseInt(req.FormValue(qSeconds), 10, 64)
   704  		if sec <= 0 || err != nil {
   705  			sec = 1800
   706  		}
   707  		binloginfo.DisableSkipBinlogFlag()
   708  		timeout := time.Duration(sec) * time.Second
   709  		err = binloginfo.WaitBinlogRecover(timeout)
   710  		if err != nil {
   711  			writeError(w, err)
   712  			return
   713  		}
   714  	}
   715  	writeData(w, binloginfo.GetBinlogStatus())
   716  }
   717  
   718  type blockFlashReplicaInfo struct {
   719  	// Modifying the field name needs to negotiate with TiFlash defCausleague.
   720  	ID             int64    `json:"id"`
   721  	ReplicaCount   uint64   `json:"replica_count"`
   722  	LocationLabels []string `json:"location_labels"`
   723  	Available      bool     `json:"available"`
   724  	HighPriority   bool     `json:"high_priority"`
   725  }
   726  
   727  func (h flashReplicaHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
   728  	if req.Method == http.MethodPost {
   729  		h.handleStatusReport(w, req)
   730  		return
   731  	}
   732  	schemaReplicant, err := h.schemaReplicant()
   733  	if err != nil {
   734  		writeError(w, err)
   735  		return
   736  	}
   737  	replicaInfos := make([]*blockFlashReplicaInfo, 0)
   738  	allDBs := schemaReplicant.AllSchemas()
   739  	for _, EDB := range allDBs {
   740  		tbls := schemaReplicant.SchemaBlocks(EDB.Name)
   741  		for _, tbl := range tbls {
   742  			replicaInfos = h.getTiFlashReplicaInfo(tbl.Meta(), replicaInfos)
   743  		}
   744  	}
   745  	dropedOrTruncateReplicaInfos, err := h.getDropOrTruncateBlockTiflash(schemaReplicant)
   746  	if err != nil {
   747  		writeError(w, err)
   748  		return
   749  	}
   750  	replicaInfos = append(replicaInfos, dropedOrTruncateReplicaInfos...)
   751  	writeData(w, replicaInfos)
   752  }
   753  
   754  func (h flashReplicaHandler) getTiFlashReplicaInfo(tblInfo *perceptron.BlockInfo, replicaInfos []*blockFlashReplicaInfo) []*blockFlashReplicaInfo {
   755  	if tblInfo.TiFlashReplica == nil {
   756  		return replicaInfos
   757  	}
   758  	if pi := tblInfo.GetPartitionInfo(); pi != nil {
   759  		for _, p := range pi.Definitions {
   760  			replicaInfos = append(replicaInfos, &blockFlashReplicaInfo{
   761  				ID:             p.ID,
   762  				ReplicaCount:   tblInfo.TiFlashReplica.Count,
   763  				LocationLabels: tblInfo.TiFlashReplica.LocationLabels,
   764  				Available:      tblInfo.TiFlashReplica.IsPartitionAvailable(p.ID),
   765  			})
   766  		}
   767  		for _, p := range pi.AddingDefinitions {
   768  			replicaInfos = append(replicaInfos, &blockFlashReplicaInfo{
   769  				ID:             p.ID,
   770  				ReplicaCount:   tblInfo.TiFlashReplica.Count,
   771  				LocationLabels: tblInfo.TiFlashReplica.LocationLabels,
   772  				Available:      tblInfo.TiFlashReplica.IsPartitionAvailable(p.ID),
   773  				HighPriority:   true,
   774  			})
   775  		}
   776  		return replicaInfos
   777  	}
   778  	replicaInfos = append(replicaInfos, &blockFlashReplicaInfo{
   779  		ID:             tblInfo.ID,
   780  		ReplicaCount:   tblInfo.TiFlashReplica.Count,
   781  		LocationLabels: tblInfo.TiFlashReplica.LocationLabels,
   782  		Available:      tblInfo.TiFlashReplica.Available,
   783  	})
   784  	return replicaInfos
   785  }
   786  
   787  func (h flashReplicaHandler) getDropOrTruncateBlockTiflash(currentSchema schemareplicant.SchemaReplicant) ([]*blockFlashReplicaInfo, error) {
   788  	s, err := stochastik.CreateStochastik(h.CausetStore.(ekv.CausetStorage))
   789  	if err != nil {
   790  		return nil, errors.Trace(err)
   791  	}
   792  
   793  	if s != nil {
   794  		defer s.Close()
   795  	}
   796  
   797  	causetstore := petri.GetPetri(s).CausetStore()
   798  	txn, err := causetstore.Begin()
   799  	if err != nil {
   800  		return nil, errors.Trace(err)
   801  	}
   802  	gcSafePoint, err := gcutil.GetGCSafePoint(s)
   803  	if err != nil {
   804  		return nil, err
   805  	}
   806  	replicaInfos := make([]*blockFlashReplicaInfo, 0)
   807  	uniqueIDMap := make(map[int64]struct{})
   808  	handleJobAndBlockInfo := func(job *perceptron.Job, tblInfo *perceptron.BlockInfo) (bool, error) {
   809  		// Avoid duplicate causet ID info.
   810  		if _, ok := currentSchema.BlockByID(tblInfo.ID); ok {
   811  			return false, nil
   812  		}
   813  		if _, ok := uniqueIDMap[tblInfo.ID]; ok {
   814  			return false, nil
   815  		}
   816  		uniqueIDMap[tblInfo.ID] = struct{}{}
   817  		replicaInfos = h.getTiFlashReplicaInfo(tblInfo, replicaInfos)
   818  		return false, nil
   819  	}
   820  	dom := petri.GetPetri(s)
   821  	fn := func(jobs []*perceptron.Job) (bool, error) {
   822  		return interlock.GetDropOrTruncateBlockInfoFromJobs(jobs, gcSafePoint, dom, handleJobAndBlockInfo)
   823  	}
   824  
   825  	err = admin.IterAllDBSJobs(txn, fn)
   826  	if err != nil {
   827  		if terror.ErrorEqual(variable.ErrSnapshotTooOld, err) {
   828  			// The err indicate that current dbs job and remain DBS jobs was been deleted by GC,
   829  			// just ignore the error and return directly.
   830  			return replicaInfos, nil
   831  		}
   832  		return nil, err
   833  	}
   834  	return replicaInfos, nil
   835  }
   836  
   837  type blockFlashReplicaStatus struct {
   838  	// Modifying the field name needs to negotiate with TiFlash defCausleague.
   839  	ID int64 `json:"id"`
   840  	// RegionCount is the number of regions that need sync.
   841  	RegionCount uint64 `json:"region_count"`
   842  	// FlashRegionCount is the number of regions that already sync completed.
   843  	FlashRegionCount uint64 `json:"flash_region_count"`
   844  }
   845  
   846  // checkBlockFlashReplicaAvailable uses to check the available status of causet flash replica.
   847  func (tf *blockFlashReplicaStatus) checkBlockFlashReplicaAvailable() bool {
   848  	return tf.FlashRegionCount == tf.RegionCount
   849  }
   850  
   851  func (h flashReplicaHandler) handleStatusReport(w http.ResponseWriter, req *http.Request) {
   852  	var status blockFlashReplicaStatus
   853  	err := json.NewCausetDecoder(req.Body).Decode(&status)
   854  	if err != nil {
   855  		writeError(w, err)
   856  		return
   857  	}
   858  	do, err := stochastik.GetPetri(h.CausetStore.(ekv.CausetStorage))
   859  	if err != nil {
   860  		writeError(w, err)
   861  		return
   862  	}
   863  	s, err := stochastik.CreateStochastik(h.CausetStore.(ekv.CausetStorage))
   864  	if err != nil {
   865  		writeError(w, err)
   866  		return
   867  	}
   868  	available := status.checkBlockFlashReplicaAvailable()
   869  	err = do.DBS().UFIDelateBlockReplicaInfo(s, status.ID, available)
   870  	if err != nil {
   871  		writeError(w, err)
   872  	}
   873  	if available {
   874  		err = infosync.DeleteTiFlashBlockSyncProgress(status.ID)
   875  	} else {
   876  		err = infosync.UFIDelateTiFlashBlockSyncProgress(context.Background(), status.ID, float64(status.FlashRegionCount)/float64(status.RegionCount))
   877  	}
   878  	if err != nil {
   879  		writeError(w, err)
   880  	}
   881  
   882  	logutil.BgLogger().Info("handle flash replica report", zap.Int64("causet ID", status.ID), zap.Uint64("region count",
   883  		status.RegionCount),
   884  		zap.Uint64("flash region count", status.FlashRegionCount),
   885  		zap.Error(err))
   886  }
   887  
   888  // ServeHTTP handles request of list a database or causet's schemas.
   889  func (h schemaHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
   890  	schemaReplicant, err := h.schemaReplicant()
   891  	if err != nil {
   892  		writeError(w, err)
   893  		return
   894  	}
   895  
   896  	// parse params
   897  	params := mux.Vars(req)
   898  
   899  	if dbName, ok := params[FIDelBName]; ok {
   900  		cDBName := perceptron.NewCIStr(dbName)
   901  		if blockName, ok := params[pBlockName]; ok {
   902  			// causet schemaReplicant of a specified causet name
   903  			cBlockName := perceptron.NewCIStr(blockName)
   904  			data, err := schemaReplicant.BlockByName(cDBName, cBlockName)
   905  			if err != nil {
   906  				writeError(w, err)
   907  				return
   908  			}
   909  			writeData(w, data.Meta())
   910  			return
   911  		}
   912  		// all causet schemas in a specified database
   913  		if schemaReplicant.SchemaExists(cDBName) {
   914  			tbs := schemaReplicant.SchemaBlocks(cDBName)
   915  			tbsInfo := make([]*perceptron.BlockInfo, len(tbs))
   916  			for i := range tbsInfo {
   917  				tbsInfo[i] = tbs[i].Meta()
   918  			}
   919  			writeData(w, tbsInfo)
   920  			return
   921  		}
   922  		writeError(w, schemareplicant.ErrDatabaseNotExists.GenWithStackByArgs(dbName))
   923  		return
   924  	}
   925  
   926  	if blockID := req.FormValue(qBlockID); len(blockID) > 0 {
   927  		// causet schemaReplicant of a specified blockID
   928  		tid, err := strconv.Atoi(blockID)
   929  		if err != nil {
   930  			writeError(w, err)
   931  			return
   932  		}
   933  		if tid < 0 {
   934  			writeError(w, schemareplicant.ErrBlockNotExists.GenWithStack("Block which ID = %s does not exist.", blockID))
   935  			return
   936  		}
   937  		if data, ok := schemaReplicant.BlockByID(int64(tid)); ok {
   938  			writeData(w, data.Meta())
   939  			return
   940  		}
   941  		writeError(w, schemareplicant.ErrBlockNotExists.GenWithStack("Block which ID = %s does not exist.", blockID))
   942  		return
   943  	}
   944  
   945  	// all databases' schemas
   946  	writeData(w, schemaReplicant.AllSchemas())
   947  }
   948  
   949  // ServeHTTP handles causet related requests, such as causet's region information, disk usage.
   950  func (h blockHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
   951  	// parse params
   952  	params := mux.Vars(req)
   953  	dbName := params[FIDelBName]
   954  	blockName := params[pBlockName]
   955  	schemaReplicant, err := h.schemaReplicant()
   956  	if err != nil {
   957  		writeError(w, err)
   958  		return
   959  	}
   960  
   961  	blockName, partitionName := extractBlockAndPartitionName(blockName)
   962  	blockVal, err := schemaReplicant.BlockByName(perceptron.NewCIStr(dbName), perceptron.NewCIStr(blockName))
   963  	if err != nil {
   964  		writeError(w, err)
   965  		return
   966  	}
   967  	switch h.op {
   968  	case opBlockRegions:
   969  		h.handleRegionRequest(schemaReplicant, blockVal, w, req)
   970  	case opBlockDiskUsage:
   971  		h.handleDiskUsageRequest(blockVal, w)
   972  	case opBlockScatter:
   973  		// supports partition causet, only get one physical causet, prevent too many scatter schedulers.
   974  		ptbl, err := h.getPartition(blockVal, partitionName)
   975  		if err != nil {
   976  			writeError(w, err)
   977  			return
   978  		}
   979  		h.handleScatterBlockRequest(schemaReplicant, ptbl, w, req)
   980  	case opStopBlockScatter:
   981  		ptbl, err := h.getPartition(blockVal, partitionName)
   982  		if err != nil {
   983  			writeError(w, err)
   984  			return
   985  		}
   986  		h.handleStopScatterBlockRequest(schemaReplicant, ptbl, w, req)
   987  	default:
   988  		writeError(w, errors.New("method not found"))
   989  	}
   990  }
   991  
   992  // ServeHTTP handles request of dbs jobs history.
   993  func (h dbsHistoryJobHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
   994  	if limitID := req.FormValue(qLimit); len(limitID) > 0 {
   995  		lid, err := strconv.Atoi(limitID)
   996  
   997  		if err != nil {
   998  			writeError(w, err)
   999  			return
  1000  		}
  1001  
  1002  		if lid < 1 {
  1003  			writeError(w, errors.New("dbs history limit must be greater than 1"))
  1004  			return
  1005  		}
  1006  
  1007  		jobs, err := h.getAllHistoryDBS()
  1008  		if err != nil {
  1009  			writeError(w, errors.New("dbs history not found"))
  1010  			return
  1011  		}
  1012  
  1013  		jobsLen := len(jobs)
  1014  		if jobsLen > lid {
  1015  			start := jobsLen - lid
  1016  			jobs = jobs[start:]
  1017  		}
  1018  
  1019  		writeData(w, jobs)
  1020  		return
  1021  	}
  1022  	jobs, err := h.getAllHistoryDBS()
  1023  	if err != nil {
  1024  		writeError(w, errors.New("dbs history not found"))
  1025  		return
  1026  	}
  1027  	writeData(w, jobs)
  1028  }
  1029  
  1030  func (h dbsHistoryJobHandler) getAllHistoryDBS() ([]*perceptron.Job, error) {
  1031  	s, err := stochastik.CreateStochastik(h.CausetStore.(ekv.CausetStorage))
  1032  	if err != nil {
  1033  		return nil, errors.Trace(err)
  1034  	}
  1035  
  1036  	if s != nil {
  1037  		defer s.Close()
  1038  	}
  1039  
  1040  	causetstore := petri.GetPetri(s.(stochastikctx.Context)).CausetStore()
  1041  	txn, err := causetstore.Begin()
  1042  
  1043  	if err != nil {
  1044  		return nil, errors.Trace(err)
  1045  	}
  1046  	txnMeta := spacetime.NewMeta(txn)
  1047  
  1048  	jobs, err := txnMeta.GetAllHistoryDBSJobs()
  1049  	if err != nil {
  1050  		return nil, errors.Trace(err)
  1051  	}
  1052  	return jobs, nil
  1053  }
  1054  
  1055  func (h dbsResignTenantHandler) resignDBSTenant() error {
  1056  	dom, err := stochastik.GetPetri(h.causetstore)
  1057  	if err != nil {
  1058  		return errors.Trace(err)
  1059  	}
  1060  
  1061  	tenantMgr := dom.DBS().TenantManager()
  1062  	err = tenantMgr.ResignTenant(context.Background())
  1063  	if err != nil {
  1064  		return errors.Trace(err)
  1065  	}
  1066  	return nil
  1067  }
  1068  
  1069  // ServeHTTP handles request of resigning dbs tenant.
  1070  func (h dbsResignTenantHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
  1071  	if req.Method != http.MethodPost {
  1072  		writeError(w, errors.Errorf("This api only support POST method."))
  1073  		return
  1074  	}
  1075  
  1076  	err := h.resignDBSTenant()
  1077  	if err != nil {
  1078  		log.Error(err)
  1079  		writeError(w, err)
  1080  		return
  1081  	}
  1082  
  1083  	writeData(w, "success!")
  1084  }
  1085  
  1086  func (h blockHandler) getFIDelAddr() ([]string, error) {
  1087  	etcd, ok := h.CausetStore.(einsteindb.EtcdBackend)
  1088  	if !ok {
  1089  		return nil, errors.New("not implemented")
  1090  	}
  1091  	FIDelAddrs, err := etcd.EtcdAddrs()
  1092  	if err != nil {
  1093  		return nil, err
  1094  	}
  1095  	if len(FIDelAddrs) == 0 {
  1096  		return nil, errors.New("fidel unavailable")
  1097  	}
  1098  	return FIDelAddrs, nil
  1099  }
  1100  
  1101  func (h blockHandler) addScatterSchedule(startKey, endKey []byte, name string) error {
  1102  	FIDelAddrs, err := h.getFIDelAddr()
  1103  	if err != nil {
  1104  		return err
  1105  	}
  1106  	input := map[string]string{
  1107  		"name":       "scatter-range",
  1108  		"start_key":  url.QueryEscape(string(startKey)),
  1109  		"end_key":    url.QueryEscape(string(endKey)),
  1110  		"range_name": name,
  1111  	}
  1112  	v, err := json.Marshal(input)
  1113  	if err != nil {
  1114  		return err
  1115  	}
  1116  	scheduleURL := fmt.Sprintf("%s://%s/fidel/api/v1/schedulers", soliton.InternalHTTPSchema(), FIDelAddrs[0])
  1117  	resp, err := soliton.InternalHTTPClient().Post(scheduleURL, "application/json", bytes.NewBuffer(v))
  1118  	if err != nil {
  1119  		return err
  1120  	}
  1121  	if err := resp.Body.Close(); err != nil {
  1122  		log.Error(err)
  1123  	}
  1124  	return nil
  1125  }
  1126  
  1127  func (h blockHandler) deleteScatterSchedule(name string) error {
  1128  	FIDelAddrs, err := h.getFIDelAddr()
  1129  	if err != nil {
  1130  		return err
  1131  	}
  1132  	scheduleURL := fmt.Sprintf("%s://%s/fidel/api/v1/schedulers/scatter-range-%s", soliton.InternalHTTPSchema(), FIDelAddrs[0], name)
  1133  	req, err := http.NewRequest(http.MethodDelete, scheduleURL, nil)
  1134  	if err != nil {
  1135  		return err
  1136  	}
  1137  	resp, err := soliton.InternalHTTPClient().Do(req)
  1138  	if err != nil {
  1139  		return err
  1140  	}
  1141  	if err := resp.Body.Close(); err != nil {
  1142  		log.Error(err)
  1143  	}
  1144  	return nil
  1145  }
  1146  
  1147  func (h blockHandler) handleScatterBlockRequest(schemaReplicant schemareplicant.SchemaReplicant, tbl causet.PhysicalBlock, w http.ResponseWriter, req *http.Request) {
  1148  	// for record
  1149  	blockID := tbl.GetPhysicalID()
  1150  	startKey, endKey := blockcodec.GetBlockHandleKeyRange(blockID)
  1151  	startKey = codec.EncodeBytes([]byte{}, startKey)
  1152  	endKey = codec.EncodeBytes([]byte{}, endKey)
  1153  	blockName := fmt.Sprintf("%s-%d", tbl.Meta().Name.String(), blockID)
  1154  	err := h.addScatterSchedule(startKey, endKey, blockName)
  1155  	if err != nil {
  1156  		writeError(w, errors.Annotate(err, "scatter record error"))
  1157  		return
  1158  	}
  1159  	// for indices
  1160  	for _, index := range tbl.Indices() {
  1161  		indexID := index.Meta().ID
  1162  		indexName := index.Meta().Name.String()
  1163  		startKey, endKey := blockcodec.GetBlockIndexKeyRange(blockID, indexID)
  1164  		startKey = codec.EncodeBytes([]byte{}, startKey)
  1165  		endKey = codec.EncodeBytes([]byte{}, endKey)
  1166  		name := blockName + "-" + indexName
  1167  		err := h.addScatterSchedule(startKey, endKey, name)
  1168  		if err != nil {
  1169  			writeError(w, errors.Annotatef(err, "scatter index(%s) error", name))
  1170  			return
  1171  		}
  1172  	}
  1173  	writeData(w, "success!")
  1174  }
  1175  
  1176  func (h blockHandler) handleStopScatterBlockRequest(schemaReplicant schemareplicant.SchemaReplicant, tbl causet.PhysicalBlock, w http.ResponseWriter, req *http.Request) {
  1177  	// for record
  1178  	blockName := fmt.Sprintf("%s-%d", tbl.Meta().Name.String(), tbl.GetPhysicalID())
  1179  	err := h.deleteScatterSchedule(blockName)
  1180  	if err != nil {
  1181  		writeError(w, errors.Annotate(err, "stop scatter record error"))
  1182  		return
  1183  	}
  1184  	// for indices
  1185  	for _, index := range tbl.Indices() {
  1186  		indexName := index.Meta().Name.String()
  1187  		name := blockName + "-" + indexName
  1188  		err := h.deleteScatterSchedule(name)
  1189  		if err != nil {
  1190  			writeError(w, errors.Annotatef(err, "delete scatter index(%s) error", name))
  1191  			return
  1192  		}
  1193  	}
  1194  	writeData(w, "success!")
  1195  }
  1196  
  1197  func (h blockHandler) handleRegionRequest(schemaReplicant schemareplicant.SchemaReplicant, tbl causet.Block, w http.ResponseWriter, req *http.Request) {
  1198  	pi := tbl.Meta().GetPartitionInfo()
  1199  	if pi != nil {
  1200  		// Partitioned causet.
  1201  		var data []*BlockRegions
  1202  		for _, def := range pi.Definitions {
  1203  			blockRegions, err := h.getRegionsByID(tbl, def.ID, def.Name.O)
  1204  			if err != nil {
  1205  				writeError(w, err)
  1206  				return
  1207  			}
  1208  
  1209  			data = append(data, blockRegions)
  1210  		}
  1211  		writeData(w, data)
  1212  		return
  1213  	}
  1214  
  1215  	spacetime := tbl.Meta()
  1216  	blockRegions, err := h.getRegionsByID(tbl, spacetime.ID, spacetime.Name.O)
  1217  	if err != nil {
  1218  		writeError(w, err)
  1219  		return
  1220  	}
  1221  
  1222  	writeData(w, blockRegions)
  1223  }
  1224  
  1225  func (h blockHandler) getRegionsByID(tbl causet.Block, id int64, name string) (*BlockRegions, error) {
  1226  	// for record
  1227  	startKey, endKey := blockcodec.GetBlockHandleKeyRange(id)
  1228  	ctx := context.Background()
  1229  	FIDelCli := h.RegionCache.FIDelClient()
  1230  	regions, err := FIDelCli.ScanRegions(ctx, startKey, endKey, -1)
  1231  	if err != nil {
  1232  		return nil, err
  1233  	}
  1234  
  1235  	recordRegions := make([]RegionMeta, 0, len(regions))
  1236  	for _, region := range regions {
  1237  		spacetime := RegionMeta{
  1238  			ID:          region.Meta.Id,
  1239  			Leader:      region.Leader,
  1240  			Peers:       region.Meta.Peers,
  1241  			RegionEpoch: region.Meta.RegionEpoch,
  1242  		}
  1243  		recordRegions = append(recordRegions, spacetime)
  1244  	}
  1245  
  1246  	// for indices
  1247  	indices := make([]IndexRegions, len(tbl.Indices()))
  1248  	for i, index := range tbl.Indices() {
  1249  		indexID := index.Meta().ID
  1250  		indices[i].Name = index.Meta().Name.String()
  1251  		indices[i].ID = indexID
  1252  		startKey, endKey := blockcodec.GetBlockIndexKeyRange(id, indexID)
  1253  		regions, err := FIDelCli.ScanRegions(ctx, startKey, endKey, -1)
  1254  		if err != nil {
  1255  			return nil, err
  1256  		}
  1257  		indexRegions := make([]RegionMeta, 0, len(regions))
  1258  		for _, region := range regions {
  1259  			spacetime := RegionMeta{
  1260  				ID:          region.Meta.Id,
  1261  				Leader:      region.Leader,
  1262  				Peers:       region.Meta.Peers,
  1263  				RegionEpoch: region.Meta.RegionEpoch,
  1264  			}
  1265  			indexRegions = append(indexRegions, spacetime)
  1266  		}
  1267  		indices[i].Regions = indexRegions
  1268  	}
  1269  
  1270  	return &BlockRegions{
  1271  		BlockName:     name,
  1272  		BlockID:       id,
  1273  		Indices:       indices,
  1274  		RecordRegions: recordRegions,
  1275  	}, nil
  1276  }
  1277  
  1278  func (h blockHandler) handleDiskUsageRequest(tbl causet.Block, w http.ResponseWriter) {
  1279  	blockID := tbl.Meta().ID
  1280  	var stats helper.FIDelRegionStats
  1281  	err := h.GetFIDelRegionStats(blockID, &stats)
  1282  	if err != nil {
  1283  		writeError(w, err)
  1284  		return
  1285  	}
  1286  	writeData(w, stats.StorageSize)
  1287  }
  1288  
  1289  type hotRegion struct {
  1290  	helper.TblIndex
  1291  	helper.RegionMetric
  1292  }
  1293  type hotRegions []hotRegion
  1294  
  1295  func (rs hotRegions) Len() int {
  1296  	return len(rs)
  1297  }
  1298  
  1299  func (rs hotRegions) Less(i, j int) bool {
  1300  	return rs[i].MaxHotDegree > rs[j].MaxHotDegree || (rs[i].MaxHotDegree == rs[j].MaxHotDegree && rs[i].FlowBytes > rs[j].FlowBytes)
  1301  }
  1302  
  1303  func (rs hotRegions) Swap(i, j int) {
  1304  	rs[i], rs[j] = rs[j], rs[i]
  1305  }
  1306  
  1307  // ServeHTTP handles request of get region by ID.
  1308  func (h regionHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
  1309  	// parse and check params
  1310  	params := mux.Vars(req)
  1311  	if _, ok := params[pRegionID]; !ok {
  1312  		router := mux.CurrentRoute(req).GetName()
  1313  		if router == "RegionsMeta" {
  1314  			startKey := []byte{'m'}
  1315  			endKey := []byte{'n'}
  1316  
  1317  			recordRegionIDs, err := h.RegionCache.ListRegionIDsInKeyRange(einsteindb.NewBackofferWithVars(context.Background(), 500, nil), startKey, endKey)
  1318  			if err != nil {
  1319  				writeError(w, err)
  1320  				return
  1321  			}
  1322  
  1323  			recordRegions, err := h.getRegionsMeta(recordRegionIDs)
  1324  			if err != nil {
  1325  				writeError(w, err)
  1326  				return
  1327  			}
  1328  			writeData(w, recordRegions)
  1329  			return
  1330  		}
  1331  		if router == "RegionHot" {
  1332  			schemaReplicant, err := h.schemaReplicant()
  1333  			if err != nil {
  1334  				writeError(w, err)
  1335  				return
  1336  			}
  1337  			hotRead, err := h.ScrapeHotInfo(FIDelapi.HotRead, schemaReplicant.AllSchemas())
  1338  			if err != nil {
  1339  				writeError(w, err)
  1340  				return
  1341  			}
  1342  			hotWrite, err := h.ScrapeHotInfo(FIDelapi.HotWrite, schemaReplicant.AllSchemas())
  1343  			if err != nil {
  1344  				writeError(w, err)
  1345  				return
  1346  			}
  1347  			writeData(w, map[string]interface{}{
  1348  				"write": hotWrite,
  1349  				"read":  hotRead,
  1350  			})
  1351  			return
  1352  		}
  1353  		return
  1354  	}
  1355  
  1356  	regionIDInt, err := strconv.ParseInt(params[pRegionID], 0, 64)
  1357  	if err != nil {
  1358  		writeError(w, err)
  1359  		return
  1360  	}
  1361  	regionID := uint64(regionIDInt)
  1362  
  1363  	// locate region
  1364  	region, err := h.RegionCache.LocateRegionByID(einsteindb.NewBackofferWithVars(context.Background(), 500, nil), regionID)
  1365  	if err != nil {
  1366  		writeError(w, err)
  1367  		return
  1368  	}
  1369  
  1370  	frameRange, err := helper.NewRegionFrameRange(region)
  1371  	if err != nil {
  1372  		writeError(w, err)
  1373  		return
  1374  	}
  1375  
  1376  	// create RegionDetail from RegionFrameRange
  1377  	regionDetail := &RegionDetail{
  1378  		RegionID: regionID,
  1379  		StartKey: region.StartKey,
  1380  		EndKey:   region.EndKey,
  1381  	}
  1382  	schemaReplicant, err := h.schemaReplicant()
  1383  	if err != nil {
  1384  		writeError(w, err)
  1385  		return
  1386  	}
  1387  	// Since we need a database's name for each frame, and a causet's database name can not
  1388  	// get from causet's ID directly. Above all, here do dot process like
  1389  	// 		`for id in [frameRange.firstBlockID,frameRange.endBlockID]`
  1390  	// on [frameRange.firstBlockID,frameRange.endBlockID] is small enough.
  1391  	for _, EDB := range schemaReplicant.AllSchemas() {
  1392  		if soliton.IsMemDB(EDB.Name.L) {
  1393  			continue
  1394  		}
  1395  		for _, blockVal := range EDB.Blocks {
  1396  			regionDetail.addBlockInRange(EDB.Name.String(), blockVal, frameRange)
  1397  		}
  1398  	}
  1399  	writeData(w, regionDetail)
  1400  }
  1401  
  1402  // parseQuery is used to parse query string in URL with shouldUnescape, due to golang http package can not distinguish
  1403  // query like "?a=" and "?a". We rewrite it to separate these two queries. e.g.
  1404  // "?a=" which means that a is an empty string "";
  1405  // "?a"  which means that a is null.
  1406  // If shouldUnescape is true, we use QueryUnescape to handle keys and values that will be put in m.
  1407  // If shouldUnescape is false, we don't use QueryUnescap to handle.
  1408  func parseQuery(query string, m url.Values, shouldUnescape bool) error {
  1409  	var err error
  1410  	for query != "" {
  1411  		key := query
  1412  		if i := strings.IndexAny(key, "&;"); i >= 0 {
  1413  			key, query = key[:i], key[i+1:]
  1414  		} else {
  1415  			query = ""
  1416  		}
  1417  		if key == "" {
  1418  			continue
  1419  		}
  1420  		if i := strings.Index(key, "="); i >= 0 {
  1421  			value := ""
  1422  			key, value = key[:i], key[i+1:]
  1423  			if shouldUnescape {
  1424  				key, err = url.QueryUnescape(key)
  1425  				if err != nil {
  1426  					return errors.Trace(err)
  1427  				}
  1428  				value, err = url.QueryUnescape(value)
  1429  				if err != nil {
  1430  					return errors.Trace(err)
  1431  				}
  1432  			}
  1433  			m[key] = append(m[key], value)
  1434  		} else {
  1435  			if shouldUnescape {
  1436  				key, err = url.QueryUnescape(key)
  1437  				if err != nil {
  1438  					return errors.Trace(err)
  1439  				}
  1440  			}
  1441  			if _, ok := m[key]; !ok {
  1442  				m[key] = nil
  1443  			}
  1444  		}
  1445  	}
  1446  	return errors.Trace(err)
  1447  }
  1448  
  1449  // ServeHTTP handles request of list a causet's regions.
  1450  func (h mvccTxnHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
  1451  	var data interface{}
  1452  	params := mux.Vars(req)
  1453  	var err error
  1454  	switch h.op {
  1455  	case opMvccGetByHex:
  1456  		data, err = h.handleMvccGetByHex(params)
  1457  	case opMvccGetByIdx:
  1458  		if req.URL == nil {
  1459  			err = errors.BadRequestf("Invalid URL")
  1460  			break
  1461  		}
  1462  		values := make(url.Values)
  1463  		err = parseQuery(req.URL.RawQuery, values, true)
  1464  		if err == nil {
  1465  			data, err = h.handleMvccGetByIdx(params, values)
  1466  		}
  1467  	case opMvccGetByKey:
  1468  		decode := len(req.URL.Query().Get("decode")) > 0
  1469  		data, err = h.handleMvccGetByKey(params, decode)
  1470  	case opMvccGetByTxn:
  1471  		data, err = h.handleMvccGetByTxn(params)
  1472  	default:
  1473  		err = errors.NotSupportedf("Operation not supported.")
  1474  	}
  1475  	if err != nil {
  1476  		writeError(w, err)
  1477  	} else {
  1478  		writeData(w, data)
  1479  	}
  1480  }
  1481  
  1482  func extractBlockAndPartitionName(str string) (string, string) {
  1483  	// extract causet name and partition name from this "causet(partition)":
  1484  	// A sane person would not let the the causet name or partition name contain '('.
  1485  	start := strings.IndexByte(str, '(')
  1486  	if start == -1 {
  1487  		return str, ""
  1488  	}
  1489  	end := strings.IndexByte(str, ')')
  1490  	if end == -1 {
  1491  		return str, ""
  1492  	}
  1493  	return str[:start], str[start+1 : end]
  1494  }
  1495  
  1496  // handleMvccGetByIdx gets MVCC info by an index key.
  1497  func (h mvccTxnHandler) handleMvccGetByIdx(params map[string]string, values url.Values) (interface{}, error) {
  1498  	dbName := params[FIDelBName]
  1499  	blockName := params[pBlockName]
  1500  	handleStr := params[pHandle]
  1501  
  1502  	t, err := h.getBlock(dbName, blockName)
  1503  	if err != nil {
  1504  		return nil, errors.Trace(err)
  1505  	}
  1506  
  1507  	var idxDefCauss []*perceptron.DeferredCausetInfo
  1508  	var idx causet.Index
  1509  	for _, v := range t.Indices() {
  1510  		if strings.EqualFold(v.Meta().Name.String(), params[pIndexName]) {
  1511  			for _, c := range v.Meta().DeferredCausets {
  1512  				idxDefCauss = append(idxDefCauss, t.Meta().DeferredCausets[c.Offset])
  1513  			}
  1514  			idx = v
  1515  			break
  1516  		}
  1517  	}
  1518  	if idx == nil {
  1519  		return nil, errors.NotFoundf("Index %s not found!", params[pIndexName])
  1520  	}
  1521  	return h.getMvccByIdxValue(idx, values, idxDefCauss, handleStr)
  1522  }
  1523  
  1524  func (h mvccTxnHandler) handleMvccGetByKey(params map[string]string, decodeData bool) (interface{}, error) {
  1525  	handle, err := strconv.ParseInt(params[pHandle], 0, 64)
  1526  	if err != nil {
  1527  		return nil, errors.Trace(err)
  1528  	}
  1529  
  1530  	tb, err := h.getBlock(params[FIDelBName], params[pBlockName])
  1531  	if err != nil {
  1532  		return nil, errors.Trace(err)
  1533  	}
  1534  	resp, err := h.getMvccByHandle(tb.GetPhysicalID(), handle)
  1535  	if err != nil {
  1536  		return nil, err
  1537  	}
  1538  	if !decodeData {
  1539  		return resp, nil
  1540  	}
  1541  	defCausMap := make(map[int64]*types.FieldType, 3)
  1542  	for _, defCaus := range tb.Meta().DeferredCausets {
  1543  		defCausMap[defCaus.ID] = &defCaus.FieldType
  1544  	}
  1545  
  1546  	respValue := resp.Value
  1547  	var result interface{} = resp
  1548  	if respValue.Info != nil {
  1549  		quantum := make(map[string][]map[string]string)
  1550  		for _, w := range respValue.Info.Writes {
  1551  			if len(w.ShortValue) > 0 {
  1552  				quantum[strconv.FormatUint(w.StartTs, 10)], err = h.decodeMvccData(w.ShortValue, defCausMap, tb.Meta())
  1553  			}
  1554  		}
  1555  
  1556  		for _, v := range respValue.Info.Values {
  1557  			if len(v.Value) > 0 {
  1558  				quantum[strconv.FormatUint(v.StartTs, 10)], err = h.decodeMvccData(v.Value, defCausMap, tb.Meta())
  1559  			}
  1560  		}
  1561  
  1562  		if len(quantum) > 0 {
  1563  			re := map[string]interface{}{
  1564  				"key":  resp.Key,
  1565  				"info": respValue.Info,
  1566  				"data": quantum,
  1567  			}
  1568  			if err != nil {
  1569  				re["decode_error"] = err.Error()
  1570  			}
  1571  			result = re
  1572  		}
  1573  	}
  1574  
  1575  	return result, nil
  1576  }
  1577  
  1578  func (h mvccTxnHandler) decodeMvccData(bs []byte, defCausMap map[int64]*types.FieldType, tb *perceptron.BlockInfo) ([]map[string]string, error) {
  1579  	rs, err := blockcodec.DecodeRowToCausetMap(bs, defCausMap, time.UTC)
  1580  	var record []map[string]string
  1581  	for _, defCaus := range tb.DeferredCausets {
  1582  		if c, ok := rs[defCaus.ID]; ok {
  1583  			data := "nil"
  1584  			if !c.IsNull() {
  1585  				data, err = c.ToString()
  1586  			}
  1587  			record = append(record, map[string]string{defCaus.Name.O: data})
  1588  		}
  1589  	}
  1590  	return record, err
  1591  }
  1592  
  1593  func (h *mvccTxnHandler) handleMvccGetByTxn(params map[string]string) (interface{}, error) {
  1594  	startTS, err := strconv.ParseInt(params[pStartTS], 0, 64)
  1595  	if err != nil {
  1596  		return nil, errors.Trace(err)
  1597  	}
  1598  	blockID, err := h.getBlockID(params[FIDelBName], params[pBlockName])
  1599  	if err != nil {
  1600  		return nil, errors.Trace(err)
  1601  	}
  1602  	startKey := blockcodec.EncodeBlockPrefix(blockID)
  1603  	endKey := blockcodec.EncodeRowKeyWithHandle(blockID, ekv.IntHandle(math.MaxInt64))
  1604  	return h.getMvccByStartTs(uint64(startTS), startKey, endKey)
  1605  }
  1606  
  1607  // serverInfo is used to report the servers info when do http request.
  1608  type serverInfo struct {
  1609  	IsTenant bool `json:"is_tenant"`
  1610  	*infosync.ServerInfo
  1611  }
  1612  
  1613  // ServeHTTP handles request of dbs server info.
  1614  func (h serverInfoHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
  1615  	do, err := stochastik.GetPetri(h.CausetStore.(ekv.CausetStorage))
  1616  	if err != nil {
  1617  		writeError(w, errors.New("create stochastik error"))
  1618  		log.Error(err)
  1619  		return
  1620  	}
  1621  	info := serverInfo{}
  1622  	info.ServerInfo, err = infosync.GetServerInfo()
  1623  	if err != nil {
  1624  		writeError(w, err)
  1625  		log.Error(err)
  1626  		return
  1627  	}
  1628  	info.IsTenant = do.DBS().TenantManager().IsTenant()
  1629  	writeData(w, info)
  1630  }
  1631  
  1632  // clusterServerInfo is used to report cluster servers info when do http request.
  1633  type clusterServerInfo struct {
  1634  	ServersNum                   int                             `json:"servers_num,omitempty"`
  1635  	TenantID                     string                          `json:"tenant_id"`
  1636  	IsAllServerVersionConsistent bool                            `json:"is_all_server_version_consistent,omitempty"`
  1637  	AllServersDiffVersions       []infosync.ServerVersionInfo    `json:"all_servers_diff_versions,omitempty"`
  1638  	AllServersInfo               map[string]*infosync.ServerInfo `json:"all_servers_info,omitempty"`
  1639  }
  1640  
  1641  // ServeHTTP handles request of all dbs servers info.
  1642  func (h allServerInfoHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
  1643  	do, err := stochastik.GetPetri(h.CausetStore.(ekv.CausetStorage))
  1644  	if err != nil {
  1645  		writeError(w, errors.New("create stochastik error"))
  1646  		log.Error(err)
  1647  		return
  1648  	}
  1649  	ctx := context.Background()
  1650  	allServersInfo, err := infosync.GetAllServerInfo(ctx)
  1651  	if err != nil {
  1652  		writeError(w, errors.New("dbs server information not found"))
  1653  		log.Error(err)
  1654  		return
  1655  	}
  1656  	ctx, cancel := context.WithTimeout(ctx, 3*time.Second)
  1657  	tenantID, err := do.DBS().TenantManager().GetTenantID(ctx)
  1658  	cancel()
  1659  	if err != nil {
  1660  		writeError(w, errors.New("dbs server information not found"))
  1661  		log.Error(err)
  1662  		return
  1663  	}
  1664  	allVersionsMap := map[infosync.ServerVersionInfo]struct{}{}
  1665  	allVersions := make([]infosync.ServerVersionInfo, 0, len(allServersInfo))
  1666  	for _, v := range allServersInfo {
  1667  		if _, ok := allVersionsMap[v.ServerVersionInfo]; ok {
  1668  			continue
  1669  		}
  1670  		allVersionsMap[v.ServerVersionInfo] = struct{}{}
  1671  		allVersions = append(allVersions, v.ServerVersionInfo)
  1672  	}
  1673  	clusterInfo := clusterServerInfo{
  1674  		ServersNum: len(allServersInfo),
  1675  		TenantID:   tenantID,
  1676  		// len(allVersions) = 1 indicates there has only 1 milevadb version in cluster, so all server versions are consistent.
  1677  		IsAllServerVersionConsistent: len(allVersions) == 1,
  1678  		AllServersInfo:               allServersInfo,
  1679  	}
  1680  	// if IsAllServerVersionConsistent is false, return the all milevadb servers version.
  1681  	if !clusterInfo.IsAllServerVersionConsistent {
  1682  		clusterInfo.AllServersDiffVersions = allVersions
  1683  	}
  1684  	writeData(w, clusterInfo)
  1685  }
  1686  
  1687  // dbBlockInfo is used to report the database, causet information and the current schemaReplicant version.
  1688  type dbBlockInfo struct {
  1689  	DBInfo        *perceptron.DBInfo    `json:"db_info"`
  1690  	BlockInfo     *perceptron.BlockInfo `json:"block_info"`
  1691  	SchemaVersion int64                 `json:"schema_version"`
  1692  }
  1693  
  1694  // ServeHTTP handles request of database information and causet information by blockID.
  1695  func (h dbBlockHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
  1696  	params := mux.Vars(req)
  1697  	blockID := params[pBlockID]
  1698  	physicalID, err := strconv.Atoi(blockID)
  1699  	if err != nil {
  1700  		writeError(w, errors.Errorf("Wrong blockID: %v", blockID))
  1701  		return
  1702  	}
  1703  
  1704  	schemaReplicant, err := h.schemaReplicant()
  1705  	if err != nil {
  1706  		writeError(w, err)
  1707  		return
  1708  	}
  1709  
  1710  	dbTblInfo := dbBlockInfo{
  1711  		SchemaVersion: schemaReplicant.SchemaMetaVersion(),
  1712  	}
  1713  	tbl, ok := schemaReplicant.BlockByID(int64(physicalID))
  1714  	if ok {
  1715  		dbTblInfo.BlockInfo = tbl.Meta()
  1716  		dbInfo, ok := schemaReplicant.SchemaByBlock(dbTblInfo.BlockInfo)
  1717  		if !ok {
  1718  			logutil.BgLogger().Error("can not find the database of the causet", zap.Int64("causet id", dbTblInfo.BlockInfo.ID), zap.String("causet name", dbTblInfo.BlockInfo.Name.L))
  1719  			writeError(w, schemareplicant.ErrBlockNotExists.GenWithStack("Block which ID = %s does not exist.", blockID))
  1720  			return
  1721  		}
  1722  		dbTblInfo.DBInfo = dbInfo
  1723  		writeData(w, dbTblInfo)
  1724  		return
  1725  	}
  1726  	// The physicalID maybe a partition ID of the partition-causet.
  1727  	tbl, dbInfo := schemaReplicant.FindBlockByPartitionID(int64(physicalID))
  1728  	if tbl == nil {
  1729  		writeError(w, schemareplicant.ErrBlockNotExists.GenWithStack("Block which ID = %s does not exist.", blockID))
  1730  		return
  1731  	}
  1732  	dbTblInfo.BlockInfo = tbl.Meta()
  1733  	dbTblInfo.DBInfo = dbInfo
  1734  	writeData(w, dbTblInfo)
  1735  }
  1736  
  1737  // ServeHTTP handles request of MilevaDB metric profile.
  1738  func (h profileHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
  1739  	sctx, err := stochastik.CreateStochastik(h.CausetStore)
  1740  	if err != nil {
  1741  		writeError(w, err)
  1742  		return
  1743  	}
  1744  	var start, end time.Time
  1745  	if req.FormValue("end") != "" {
  1746  		end, err = time.ParseInLocation(time.RFC3339, req.FormValue("end"), sctx.GetStochastikVars().Location())
  1747  		if err != nil {
  1748  			writeError(w, err)
  1749  			return
  1750  		}
  1751  	} else {
  1752  		end = time.Now()
  1753  	}
  1754  	if req.FormValue("start") != "" {
  1755  		start, err = time.ParseInLocation(time.RFC3339, req.FormValue("start"), sctx.GetStochastikVars().Location())
  1756  		if err != nil {
  1757  			writeError(w, err)
  1758  			return
  1759  		}
  1760  	} else {
  1761  		start = end.Add(-time.Minute * 10)
  1762  	}
  1763  	valueTp := req.FormValue("type")
  1764  	pb, err := interlock.NewProfileBuilder(sctx, start, end, valueTp)
  1765  	if err != nil {
  1766  		writeError(w, err)
  1767  		return
  1768  	}
  1769  	err = pb.DefCauslect()
  1770  	if err != nil {
  1771  		writeError(w, err)
  1772  		return
  1773  	}
  1774  	_, err = w.Write(pb.Build())
  1775  	terror.Log(errors.Trace(err))
  1776  }
  1777  
  1778  // testHandler is the handler for tests. It's convenient to provide some APIs for integration tests.
  1779  type testHandler struct {
  1780  	*einsteindbHandlerTool
  1781  	gcIsRunning uint32
  1782  }
  1783  
  1784  // ServeHTTP handles test related requests.
  1785  func (h *testHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
  1786  	params := mux.Vars(req)
  1787  	mod := strings.ToLower(params["mod"])
  1788  	op := strings.ToLower(params["op"])
  1789  
  1790  	switch mod {
  1791  	case "gc":
  1792  		h.handleGC(op, w, req)
  1793  	default:
  1794  		writeError(w, errors.NotSupportedf("module(%s)", mod))
  1795  	}
  1796  }
  1797  
  1798  // Supported operations:
  1799  //   * resolvelock?safepoint={uint64}&physical={bool}:
  1800  //	   * safepoint: resolve all locks whose timestamp is less than the safepoint.
  1801  //	   * physical: whether it uses physical(green GC) mode to scan locks. Default is true.
  1802  func (h *testHandler) handleGC(op string, w http.ResponseWriter, req *http.Request) {
  1803  	if !atomic.CompareAndSwapUint32(&h.gcIsRunning, 0, 1) {
  1804  		writeError(w, errors.New("GC is running"))
  1805  		return
  1806  	}
  1807  	defer atomic.StoreUint32(&h.gcIsRunning, 0)
  1808  
  1809  	switch op {
  1810  	case "resolvelock":
  1811  		h.handleGCResolveLocks(w, req)
  1812  	default:
  1813  		writeError(w, errors.NotSupportedf("operation(%s)", op))
  1814  	}
  1815  }
  1816  
  1817  func (h *testHandler) handleGCResolveLocks(w http.ResponseWriter, req *http.Request) {
  1818  	s := req.FormValue("safepoint")
  1819  	safePoint, err := strconv.ParseUint(s, 10, 64)
  1820  	if err != nil {
  1821  		writeError(w, errors.Errorf("parse safePoint(%s) failed", s))
  1822  		return
  1823  	}
  1824  	usePhysical := true
  1825  	s = req.FormValue("physical")
  1826  	if s != "" {
  1827  		usePhysical, err = strconv.ParseBool(s)
  1828  		if err != nil {
  1829  			writeError(w, errors.Errorf("parse physical(%s) failed", s))
  1830  			return
  1831  		}
  1832  	}
  1833  
  1834  	ctx := req.Context()
  1835  	logutil.Logger(ctx).Info("start resolving locks", zap.Uint64("safePoint", safePoint), zap.Bool("physical", usePhysical))
  1836  	physicalUsed, err := gcworker.RunResolveLocks(ctx, h.CausetStore, h.RegionCache.FIDelClient(), safePoint, "testGCWorker", 3, usePhysical)
  1837  	if err != nil {
  1838  		writeError(w, errors.Annotate(err, "resolveLocks failed"))
  1839  	} else {
  1840  		writeData(w, map[string]interface{}{
  1841  			"physicalUsed": physicalUsed,
  1842  		})
  1843  	}
  1844  }