github.com/matrixorigin/matrixone@v1.2.0/pkg/cnservice/server.go (about) 1 // Copyright 2021 - 2022 Matrix Origin 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package cnservice 16 17 import ( 18 "context" 19 "encoding/hex" 20 "fmt" 21 "io" 22 "sync" 23 "time" 24 25 "github.com/google/uuid" 26 "go.uber.org/zap" 27 28 "github.com/matrixorigin/matrixone/pkg/catalog" 29 v2 "github.com/matrixorigin/matrixone/pkg/util/metric/v2" 30 "github.com/matrixorigin/matrixone/pkg/util/profile" 31 32 "github.com/fagongzi/goetty/v2" 33 34 "github.com/matrixorigin/matrixone/pkg/bootstrap" 35 "github.com/matrixorigin/matrixone/pkg/clusterservice" 36 "github.com/matrixorigin/matrixone/pkg/cnservice/cnclient" 37 "github.com/matrixorigin/matrixone/pkg/common/moerr" 38 "github.com/matrixorigin/matrixone/pkg/common/morpc" 39 "github.com/matrixorigin/matrixone/pkg/common/mpool" 40 "github.com/matrixorigin/matrixone/pkg/common/runtime" 41 "github.com/matrixorigin/matrixone/pkg/common/stopper" 42 "github.com/matrixorigin/matrixone/pkg/config" 43 "github.com/matrixorigin/matrixone/pkg/defines" 44 "github.com/matrixorigin/matrixone/pkg/fileservice" 45 "github.com/matrixorigin/matrixone/pkg/frontend" 46 "github.com/matrixorigin/matrixone/pkg/gossip" 47 "github.com/matrixorigin/matrixone/pkg/incrservice" 48 "github.com/matrixorigin/matrixone/pkg/lockservice" 49 "github.com/matrixorigin/matrixone/pkg/logservice" 50 "github.com/matrixorigin/matrixone/pkg/logutil" 51 "github.com/matrixorigin/matrixone/pkg/pb/metadata" 52 "github.com/matrixorigin/matrixone/pkg/pb/pipeline" 53 "github.com/matrixorigin/matrixone/pkg/pb/txn" 54 "github.com/matrixorigin/matrixone/pkg/queryservice" 55 qclient "github.com/matrixorigin/matrixone/pkg/queryservice/client" 56 "github.com/matrixorigin/matrixone/pkg/sql/compile" 57 "github.com/matrixorigin/matrixone/pkg/txn/client" 58 "github.com/matrixorigin/matrixone/pkg/txn/rpc" 59 "github.com/matrixorigin/matrixone/pkg/txn/storage/memorystorage" 60 "github.com/matrixorigin/matrixone/pkg/txn/trace" 61 "github.com/matrixorigin/matrixone/pkg/udf" 62 "github.com/matrixorigin/matrixone/pkg/udf/pythonservice" 63 "github.com/matrixorigin/matrixone/pkg/util/address" 64 "github.com/matrixorigin/matrixone/pkg/util/executor" 65 "github.com/matrixorigin/matrixone/pkg/util/status" 66 "github.com/matrixorigin/matrixone/pkg/vm/engine" 67 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/blockio" 68 ) 69 70 func NewService( 71 cfg *Config, 72 ctx context.Context, 73 fileService fileservice.FileService, 74 gossipNode *gossip.Node, 75 options ...Option, 76 ) (Service, error) { 77 if err := cfg.Validate(); err != nil { 78 return nil, err 79 } 80 81 //set frontend parameters 82 cfg.Frontend.SetDefaultValues() 83 cfg.Frontend.SetMaxMessageSize(uint64(cfg.RPC.MaxMessageSize)) 84 85 configKVMap, _ := dumpCnConfig(*cfg) 86 options = append(options, WithConfigData(configKVMap)) 87 options = append(options, WithBootstrapOptions(bootstrap.WithUpgradeTenantBatch(cfg.UpgradeTenantBatchSize))) 88 89 // get metadata fs 90 metadataFS, err := fileservice.Get[fileservice.ReplaceableFileService](fileService, defines.LocalFileServiceName) 91 if err != nil { 92 return nil, err 93 } 94 // get etl fs 95 etlFS, err := fileservice.Get[fileservice.FileService](fileService, defines.ETLFileServiceName) 96 if err != nil { 97 return nil, err 98 } 99 100 srv := &service{ 101 metadata: metadata.CNStore{ 102 UUID: cfg.UUID, 103 Role: metadata.MustParseCNRole(cfg.Role), 104 }, 105 cfg: cfg, 106 logger: logutil.GetGlobalLogger().Named("cn-service"), 107 metadataFS: metadataFS, 108 etlFS: etlFS, 109 fileService: fileService, 110 sessionMgr: queryservice.NewSessionManager(), 111 addressMgr: address.NewAddressManager(cfg.ServiceHost, cfg.PortBase), 112 gossipNode: gossipNode, 113 } 114 115 srv.requestHandler = func(ctx context.Context, 116 cnAddr string, 117 message morpc.Message, 118 cs morpc.ClientSession, 119 engine engine.Engine, 120 fService fileservice.FileService, 121 lockService lockservice.LockService, 122 queryClient qclient.QueryClient, 123 hakeeper logservice.CNHAKeeperClient, 124 udfService udf.Service, 125 cli client.TxnClient, 126 aicm *defines.AutoIncrCacheManager, 127 messageAcquirer func() morpc.Message) error { 128 return nil 129 } 130 131 for _, opt := range options { 132 opt(srv) 133 } 134 srv.stopper = stopper.NewStopper("cn-service", stopper.WithLogger(srv.logger)) 135 136 srv.registerServices() 137 if _, err = srv.getHAKeeperClient(); err != nil { 138 return nil, err 139 } 140 if err := srv.initQueryService(); err != nil { 141 return nil, err 142 } 143 144 srv.stopper = stopper.NewStopper("cn-service", stopper.WithLogger(srv.logger)) 145 146 if err := srv.initMetadata(); err != nil { 147 return nil, err 148 } 149 150 srv.responsePool = &sync.Pool{ 151 New: func() any { 152 return &pipeline.Message{} 153 }, 154 } 155 156 pu := config.NewParameterUnit( 157 &cfg.Frontend, 158 nil, 159 nil, 160 engine.Nodes{engine.Node{ 161 Addr: srv.pipelineServiceServiceAddr(), 162 }}) 163 pu.HAKeeperClient = srv._hakeeperClient 164 frontend.InitServerVersion(pu.SV.MoVersion) 165 166 // Init the autoIncrCacheManager after the default value is set before the init of moserver. 167 srv.aicm = &defines.AutoIncrCacheManager{ 168 AutoIncrCaches: make(map[string]defines.AutoIncrCache), 169 Mu: &sync.Mutex{}, 170 MaxSize: pu.SV.AutoIncrCacheSize, 171 } 172 173 // init UdfService 174 var udfServices []udf.Service 175 // add python client to handle python udf 176 if srv.cfg.PythonUdfClient.ServerAddress != "" { 177 pc, err := pythonservice.NewClient(srv.cfg.PythonUdfClient) 178 if err != nil { 179 panic(err) 180 } 181 udfServices = append(udfServices, pc) 182 } 183 srv.udfService, err = udf.NewService(udfServices...) 184 if err != nil { 185 panic(err) 186 } 187 188 srv.pu = pu 189 srv.pu.LockService = srv.lockService 190 srv.pu.HAKeeperClient = srv._hakeeperClient 191 srv.pu.QueryClient = srv.queryClient 192 srv.pu.UdfService = srv.udfService 193 srv._txnClient = pu.TxnClient 194 195 if err = srv.initMOServer(ctx, pu, srv.aicm); err != nil { 196 return nil, err 197 } 198 199 server, err := morpc.NewRPCServer("pipeline-server", srv.pipelineServiceListenAddr(), 200 morpc.NewMessageCodec(srv.acquireMessage, 201 morpc.WithCodecMaxBodySize(int(cfg.RPC.MaxMessageSize))), 202 morpc.WithServerLogger(srv.logger), 203 morpc.WithServerGoettyOptions( 204 goetty.WithSessionRWBUfferSize(cfg.ReadBufferSize, cfg.WriteBufferSize), 205 goetty.WithSessionReleaseMsgFunc(func(v any) { 206 m := v.(morpc.RPCMessage) 207 if !m.InternalMessage() { 208 srv.releaseMessage(m.Message.(*pipeline.Message)) 209 } 210 }), 211 ), 212 morpc.WithServerDisableAutoCancelContext()) 213 if err != nil { 214 return nil, err 215 } 216 server.RegisterRequestHandler(srv.handleRequest) 217 srv.server = server 218 srv.storeEngine = pu.StorageEngine 219 220 srv.requestHandler = func(ctx context.Context, 221 cnAddr string, 222 message morpc.Message, 223 cs morpc.ClientSession, 224 engine engine.Engine, 225 fService fileservice.FileService, 226 lockService lockservice.LockService, 227 queryClient qclient.QueryClient, 228 hakeeper logservice.CNHAKeeperClient, 229 udfService udf.Service, 230 cli client.TxnClient, 231 aicm *defines.AutoIncrCacheManager, 232 messageAcquirer func() morpc.Message) error { 233 return nil 234 } 235 for _, opt := range options { 236 opt(srv) 237 } 238 239 // TODO: global client need to refactor 240 err = cnclient.NewCNClient( 241 srv.pipelineServiceServiceAddr(), 242 &cnclient.ClientConfig{RPC: cfg.RPC}) 243 if err != nil { 244 panic(err) 245 } 246 return srv, nil 247 } 248 249 func (s *service) Start() error { 250 s.initSqlWriterFactory() 251 252 if err := s.queryService.Start(); err != nil { 253 return err 254 } 255 256 err := s.runMoServer() 257 if err != nil { 258 return err 259 } 260 261 return s.server.Start() 262 } 263 264 func (s *service) Close() error { 265 defer logutil.LogClose(s.logger, "cnservice")() 266 267 s.stopper.Stop() 268 if err := s.bootstrapService.Close(); err != nil { 269 return err 270 } 271 if err := s.stopFrontend(); err != nil { 272 return err 273 } 274 if err := s.stopTask(); err != nil { 275 return err 276 } 277 if err := s.stopRPCs(); err != nil { 278 return err 279 } 280 // stop I/O pipeline 281 blockio.Stop() 282 283 if s.gossipNode != nil { 284 if err := s.gossipNode.Leave(time.Second); err != nil { 285 return err 286 } 287 } 288 289 if err := s.server.Close(); err != nil { 290 return err 291 } 292 return s.lockService.Close() 293 } 294 295 // ID implements the frontend.BaseService interface. 296 func (s *service) ID() string { 297 return s.cfg.UUID 298 } 299 300 // SQLAddress implements the frontend.BaseService interface. 301 func (s *service) SQLAddress() string { 302 return s.cfg.SQLAddress 303 } 304 305 // SessionMgr implements the frontend.BaseService interface. 306 func (s *service) SessionMgr() *queryservice.SessionManager { 307 return s.sessionMgr 308 } 309 310 func (s *service) CheckTenantUpgrade(_ context.Context, tenantID int64) error { 311 finalVersion := s.GetFinalVersion() 312 tenantFetchFunc := func() (int32, string, error) { 313 return int32(tenantID), finalVersion, nil 314 } 315 ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) 316 defer cancel() 317 if _, err := s.bootstrapService.MaybeUpgradeTenant(ctx, tenantFetchFunc, nil); err != nil { 318 return err 319 } 320 return nil 321 } 322 323 // UpgradeTenant Manual command tenant upgrade entrance 324 func (s *service) UpgradeTenant(ctx context.Context, tenantName string, retryCount uint32, isALLAccount bool) error { 325 ctx, cancel := context.WithTimeout(ctx, time.Minute*120) 326 defer cancel() 327 if _, err := s.bootstrapService.UpgradeTenant(ctx, tenantName, retryCount, isALLAccount); err != nil { 328 return err 329 } 330 return nil 331 } 332 333 func (s *service) GetFinalVersion() string { 334 return s.bootstrapService.GetFinalVersion() 335 } 336 337 func (s *service) stopFrontend() error { 338 defer logutil.LogClose(s.logger, "cnservice/frontend")() 339 340 if err := s.serverShutdown(true); err != nil { 341 return err 342 } 343 s.cancelMoServerFunc() 344 return nil 345 } 346 347 func (s *service) stopRPCs() error { 348 if s._txnClient != nil { 349 if err := s._txnClient.Close(); err != nil { 350 return err 351 } 352 } 353 if s._hakeeperClient != nil { 354 s.moCluster.Close() 355 if err := s._hakeeperClient.Close(); err != nil { 356 return err 357 } 358 } 359 if s._txnSender != nil { 360 if err := s._txnSender.Close(); err != nil { 361 return err 362 } 363 } 364 if s.lockService != nil { 365 if err := s.lockService.Close(); err != nil { 366 return err 367 } 368 } 369 if s.queryService != nil { 370 if err := s.queryService.Close(); err != nil { 371 return err 372 } 373 } 374 if s.queryClient != nil { 375 if err := s.queryClient.Close(); err != nil { 376 return err 377 } 378 } 379 s.timestampWaiter.Close() 380 return nil 381 } 382 383 func (s *service) acquireMessage() morpc.Message { 384 return s.responsePool.Get().(*pipeline.Message) 385 } 386 387 func (s *service) releaseMessage(m *pipeline.Message) { 388 if s.responsePool != nil { 389 m.Reset() 390 s.responsePool.Put(m) 391 } 392 } 393 394 func (s *service) handleRequest( 395 ctx context.Context, 396 value morpc.RPCMessage, 397 _ uint64, 398 cs morpc.ClientSession) error { 399 req := value.Message 400 msg, ok := req.(*pipeline.Message) 401 if !ok { 402 logutil.Errorf("cn server should receive *pipeline.Message, but get %v", req) 403 panic("cn server receive a message with unexpected type") 404 } 405 switch msg.GetSid() { 406 case pipeline.Status_WaitingNext: 407 return handleWaitingNextMsg(ctx, req, cs) 408 case pipeline.Status_Last: 409 if msg.IsPipelineMessage() { // only pipeline type need assemble msg now. 410 if err := handleAssemblePipeline(ctx, req, cs); err != nil { 411 return err 412 } 413 } 414 } 415 416 go func() { 417 defer value.Cancel() 418 s.pipelines.counter.Add(1) 419 defer s.pipelines.counter.Add(-1) 420 s.requestHandler(ctx, 421 s.pipelineServiceServiceAddr(), 422 req, 423 cs, 424 s.storeEngine, 425 s.fileService, 426 s.lockService, 427 s.queryClient, 428 s._hakeeperClient, 429 s.udfService, 430 s._txnClient, 431 s.aicm, 432 s.acquireMessage) 433 }() 434 return nil 435 } 436 437 func (s *service) initMOServer(ctx context.Context, pu *config.ParameterUnit, aicm *defines.AutoIncrCacheManager) error { 438 var err error 439 logutil.Infof("Shutdown The Server With Ctrl+C | Ctrl+\\.") 440 cancelMoServerCtx, cancelMoServerFunc := context.WithCancel(ctx) 441 s.cancelMoServerFunc = cancelMoServerFunc 442 443 pu.FileService = s.fileService 444 pu.LockService = s.lockService 445 446 logutil.Info("Initialize the engine ...") 447 err = s.initEngine(ctx, cancelMoServerCtx, pu) 448 if err != nil { 449 return err 450 } 451 452 s.createMOServer(cancelMoServerCtx, pu, aicm) 453 return nil 454 } 455 456 func (s *service) initEngine( 457 ctx context.Context, 458 cancelMoServerCtx context.Context, 459 pu *config.ParameterUnit, 460 ) error { 461 switch s.cfg.Engine.Type { 462 463 case EngineDistributedTAE: 464 if err := s.initDistributedTAE(cancelMoServerCtx, pu); err != nil { 465 return err 466 } 467 468 case EngineMemory: 469 if err := s.initMemoryEngine(cancelMoServerCtx, pu); err != nil { 470 return err 471 } 472 473 case EngineNonDistributedMemory: 474 if err := s.initMemoryEngineNonDist(cancelMoServerCtx, pu); err != nil { 475 return err 476 } 477 478 default: 479 return moerr.NewInternalError(ctx, "unknown engine type: %s", s.cfg.Engine.Type) 480 481 } 482 483 return s.bootstrap() 484 } 485 486 func (s *service) createMOServer( 487 inputCtx context.Context, 488 pu *config.ParameterUnit, 489 aicm *defines.AutoIncrCacheManager, 490 ) { 491 address := fmt.Sprintf("%s:%d", pu.SV.Host, pu.SV.Port) 492 moServerCtx := context.WithValue(inputCtx, config.ParameterUnitKey, pu) 493 s.mo = frontend.NewMOServer(moServerCtx, address, pu, aicm, s) 494 } 495 496 func (s *service) runMoServer() error { 497 return s.mo.Start() 498 } 499 500 func (s *service) serverShutdown(isgraceful bool) error { 501 return s.mo.Stop() 502 } 503 504 func (s *service) getHAKeeperClient() (client logservice.CNHAKeeperClient, err error) { 505 s.initHakeeperClientOnce.Do(func() { 506 s.hakeeperConnected = make(chan struct{}) 507 508 ctx, cancel := context.WithTimeout( 509 context.Background(), 510 s.cfg.HAKeeper.DiscoveryTimeout.Duration, 511 ) 512 defer cancel() 513 client, err = logservice.NewCNHAKeeperClient(ctx, s.cfg.HAKeeper.ClientConfig) 514 if err != nil { 515 return 516 } 517 s._hakeeperClient = client 518 s.initClusterService() 519 s.initLockService() 520 521 ss, ok := runtime.ProcessLevelRuntime().GetGlobalVariables(runtime.StatusServer) 522 if ok { 523 ss.(*status.Server).SetHAKeeperClient(client) 524 } 525 526 if err = s.startCNStoreHeartbeat(); err != nil { 527 return 528 } 529 }) 530 client = s._hakeeperClient 531 return 532 } 533 534 func (s *service) initClusterService() { 535 s.moCluster = clusterservice.NewMOCluster(s._hakeeperClient, 536 s.cfg.Cluster.RefreshInterval.Duration) 537 runtime.ProcessLevelRuntime().SetGlobalVariables(runtime.ClusterService, s.moCluster) 538 } 539 540 func (s *service) getTxnSender() (sender rpc.TxnSender, err error) { 541 // handleTemp is used to manipulate memorystorage stored for temporary table created by sessions. 542 // processing of temporary table is currently on local, so we need to add a WithLocalDispatch logic to service. 543 handleTemp := func(d metadata.TNShard) rpc.TxnRequestHandleFunc { 544 if d.Address != defines.TEMPORARY_TABLE_TN_ADDR { 545 return nil 546 } 547 548 // read, write, commit and rollback for temporary tables 549 return func(ctx context.Context, req *txn.TxnRequest, resp *txn.TxnResponse) (err error) { 550 storage, ok := ctx.Value(defines.TemporaryTN{}).(*memorystorage.Storage) 551 if !ok { 552 panic("tempStorage should never be nil") 553 } 554 555 resp.RequestID = req.RequestID 556 resp.Txn = &req.Txn 557 resp.Method = req.Method 558 resp.Flag = req.Flag 559 560 switch req.Method { 561 case txn.TxnMethod_Read: 562 res, err := storage.Read( 563 ctx, 564 req.Txn, 565 req.CNRequest.OpCode, 566 req.CNRequest.Payload, 567 ) 568 if err != nil { 569 resp.TxnError = txn.WrapError(err, moerr.ErrTAERead) 570 } else { 571 payload, err := res.Read() 572 if err != nil { 573 panic(err) 574 } 575 resp.CNOpResponse = &txn.CNOpResponse{Payload: payload} 576 res.Release() 577 } 578 case txn.TxnMethod_Write: 579 payload, err := storage.Write( 580 ctx, 581 req.Txn, 582 req.CNRequest.OpCode, 583 req.CNRequest.Payload, 584 ) 585 if err != nil { 586 resp.TxnError = txn.WrapError(err, moerr.ErrTAEWrite) 587 } else { 588 resp.CNOpResponse = &txn.CNOpResponse{Payload: payload} 589 } 590 case txn.TxnMethod_Commit: 591 _, err = storage.Commit(ctx, req.Txn) 592 if err == nil { 593 resp.Txn.Status = txn.TxnStatus_Committed 594 } 595 case txn.TxnMethod_Rollback: 596 err = storage.Rollback(ctx, req.Txn) 597 if err == nil { 598 resp.Txn.Status = txn.TxnStatus_Aborted 599 } 600 default: 601 return moerr.NewNotSupported(ctx, "unknown txn request method: %s", req.Method.String()) 602 } 603 return err 604 } 605 } 606 607 s.initTxnSenderOnce.Do(func() { 608 sender, err = rpc.NewSender( 609 s.cfg.RPC, 610 runtime.ProcessLevelRuntime(), 611 rpc.WithSenderLocalDispatch(handleTemp), 612 ) 613 if err != nil { 614 return 615 } 616 s._txnSender = sender 617 }) 618 sender = s._txnSender 619 return 620 } 621 622 func (s *service) getTxnClient() (c client.TxnClient, err error) { 623 s.initTxnClientOnce.Do(func() { 624 s.timestampWaiter = client.NewTimestampWaiter() 625 626 rt := runtime.ProcessLevelRuntime() 627 client.SetupRuntimeTxnOptions( 628 rt, 629 txn.GetTxnMode(s.cfg.Txn.Mode), 630 txn.GetTxnIsolation(s.cfg.Txn.Isolation), 631 ) 632 var sender rpc.TxnSender 633 sender, err = s.getTxnSender() 634 if err != nil { 635 return 636 } 637 var opts []client.TxnClientCreateOption 638 opts = append(opts, 639 client.WithTimestampWaiter(s.timestampWaiter)) 640 if s.cfg.Txn.EnableSacrificingFreshness == 1 { 641 opts = append(opts, 642 client.WithEnableSacrificingFreshness()) 643 } 644 if s.cfg.Txn.EnableCNBasedConsistency == 1 { 645 opts = append(opts, 646 client.WithEnableCNBasedConsistency()) 647 } 648 if s.cfg.Txn.EnableRefreshExpression == 1 { 649 opts = append(opts, 650 client.WithEnableRefreshExpression()) 651 } 652 if s.cfg.Txn.EnableLeakCheck == 1 { 653 opts = append(opts, client.WithEnableLeakCheck( 654 s.cfg.Txn.MaxActiveAges.Duration, 655 func(actives []client.ActiveTxn) { 656 name, _ := uuid.NewV7() 657 profPath := catalog.BuildProfilePath("routine", name.String()) 658 659 for _, txn := range actives { 660 fields := []zap.Field{ 661 zap.String("txn-id", hex.EncodeToString(txn.ID)), 662 zap.Time("create-at", txn.CreateAt), 663 zap.String("options", txn.Options.String()), 664 zap.String("profile", profPath), 665 } 666 if txn.Options.InRunSql { 667 //the txn runs sql in compile.Run() and doest not exist 668 v2.TxnLongRunningCounter.Inc() 669 runtime.DefaultRuntime().Logger().Error("found long running txn", fields...) 670 } else if txn.Options.InCommit { 671 v2.TxnInCommitCounter.Inc() 672 runtime.DefaultRuntime().Logger().Error("found txn in commit", fields...) 673 } else if txn.Options.InRollback { 674 v2.TxnInRollbackCounter.Inc() 675 runtime.DefaultRuntime().Logger().Error("found txn in rollback", fields...) 676 } else { 677 v2.TxnLeakCounter.Inc() 678 runtime.DefaultRuntime().Logger().Error("found leak txn", fields...) 679 } 680 } 681 682 SaveProfile(profPath, profile.GOROUTINE, s.etlFS) 683 })) 684 } 685 if s.cfg.Txn.Limit > 0 { 686 opts = append(opts, 687 client.WithTxnLimit(s.cfg.Txn.Limit)) 688 } 689 if s.cfg.Txn.MaxActive > 0 { 690 opts = append(opts, 691 client.WithMaxActiveTxn(s.cfg.Txn.MaxActive)) 692 } 693 if s.cfg.Txn.PkDedupCount > 0 { 694 opts = append(opts, client.WithCheckDup()) 695 } 696 opts = append(opts, 697 client.WithLockService(s.lockService), 698 client.WithNormalStateNoWait(s.cfg.Txn.NormalStateNoWait), 699 client.WithTxnOpenedCallback([]func(op client.TxnOperator){ 700 func(op client.TxnOperator) { 701 trace.GetService().TxnCreated(op) 702 }, 703 }), 704 ) 705 c = client.NewTxnClient( 706 sender, 707 opts...) 708 s._txnClient = c 709 }) 710 c = s._txnClient 711 return 712 } 713 714 func (s *service) initLockService() { 715 cfg := s.getLockServiceConfig() 716 s.lockService = lockservice.NewLockService( 717 cfg, 718 lockservice.WithWait(func() { 719 <-s.hakeeperConnected 720 })) 721 runtime.ProcessLevelRuntime().SetGlobalVariables(runtime.LockService, s.lockService) 722 lockservice.SetLockServiceByServiceID(s.lockService.GetServiceID(), s.lockService) 723 724 ss, ok := runtime.ProcessLevelRuntime().GetGlobalVariables(runtime.StatusServer) 725 if ok { 726 ss.(*status.Server).SetLockService(s.cfg.UUID, s.lockService) 727 } 728 } 729 730 func (s *service) GetSQLExecutor() executor.SQLExecutor { 731 return s.sqlExecutor 732 } 733 734 func (s *service) GetBootstrapService() bootstrap.Service { 735 return s.bootstrapService 736 } 737 738 // put the waiting-next type msg into client session's cache and return directly 739 func handleWaitingNextMsg(ctx context.Context, message morpc.Message, cs morpc.ClientSession) error { 740 msg, _ := message.(*pipeline.Message) 741 switch msg.GetCmd() { 742 case pipeline.Method_PipelineMessage: 743 var cache morpc.MessageCache 744 var err error 745 if cache, err = cs.CreateCache(ctx, message.GetID()); err != nil { 746 return err 747 } 748 cache.Add(message) 749 } 750 return nil 751 } 752 753 func handleAssemblePipeline(ctx context.Context, message morpc.Message, cs morpc.ClientSession) error { 754 var data []byte 755 756 cnt := uint64(0) 757 cache, err := cs.CreateCache(ctx, message.GetID()) 758 if err != nil { 759 return err 760 } 761 for { 762 msg, ok, err := cache.Pop() 763 if err != nil { 764 return err 765 } 766 if !ok { 767 cache.Close() 768 break 769 } 770 if cnt != msg.(*pipeline.Message).GetSequence() { 771 return moerr.NewInternalErrorNoCtx("Pipeline packages passed by morpc are out of order") 772 } 773 cnt++ 774 data = append(data, msg.(*pipeline.Message).GetData()...) 775 } 776 msg := message.(*pipeline.Message) 777 msg.SetData(append(data, msg.GetData()...)) 778 return nil 779 } 780 781 func (s *service) initInternalSQlExecutor(mp *mpool.MPool) { 782 s.sqlExecutor = compile.NewSQLExecutor( 783 s.pipelineServiceServiceAddr(), 784 s.storeEngine, 785 mp, 786 s._txnClient, 787 s.fileService, 788 s.queryClient, 789 s._hakeeperClient, 790 s.udfService, 791 s.aicm) 792 runtime.ProcessLevelRuntime().SetGlobalVariables(runtime.InternalSQLExecutor, s.sqlExecutor) 793 } 794 795 func (s *service) initIncrService() { 796 store, err := incrservice.NewSQLStore(s.sqlExecutor) 797 if err != nil { 798 panic(err) 799 } 800 incrService := incrservice.NewIncrService( 801 s.cfg.UUID, 802 store, 803 s.cfg.AutoIncrement) 804 runtime.ProcessLevelRuntime().SetGlobalVariables( 805 runtime.AutoIncrementService, 806 incrService) 807 incrservice.SetAutoIncrementServiceByID(s.cfg.UUID, incrService) 808 } 809 810 func (s *service) bootstrap() error { 811 s.initIncrService() 812 s.initTxnTraceService() 813 814 rt := runtime.ProcessLevelRuntime() 815 s.bootstrapService = bootstrap.NewService( 816 &locker{hakeeperClient: s._hakeeperClient}, 817 rt.Clock(), 818 s._txnClient, 819 s.sqlExecutor, 820 s.options.bootstrapOptions...) 821 822 ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) 823 ctx = context.WithValue(ctx, config.ParameterUnitKey, s.pu) 824 defer cancel() 825 826 // bootstrap cannot fail. We panic here to make sure the service can not start. 827 // If bootstrap failed, need clean all data to retry. 828 if err := s.bootstrapService.Bootstrap(ctx); err != nil { 829 panic(err) 830 } 831 832 trace.GetService().EnableFlush() 833 834 if s.cfg.AutomaticUpgrade { 835 return s.stopper.RunTask(func(ctx context.Context) { 836 ctx, cancel := context.WithTimeout(ctx, time.Minute*120) 837 defer cancel() 838 if err := s.bootstrapService.BootstrapUpgrade(ctx); err != nil { 839 if err != context.Canceled { 840 runtime.DefaultRuntime().Logger().Error("bootstrap system automatic upgrade failed by: ", zap.Error(err)) 841 //panic(err) 842 } 843 } 844 }) 845 } 846 return nil 847 } 848 849 func (s *service) initTxnTraceService() { 850 rt := runtime.ProcessLevelRuntime() 851 ts, err := trace.NewService( 852 s.options.traceDataPath, 853 s.cfg.UUID, 854 s._txnClient, 855 rt.Clock(), 856 s.sqlExecutor, 857 trace.WithEnable(s.cfg.Txn.Trace.Enable, s.cfg.Txn.Trace.Tables), 858 trace.WithBufferSize(s.cfg.Txn.Trace.BufferSize), 859 trace.WithFlushBytes(int(s.cfg.Txn.Trace.FlushBytes)), 860 trace.WithFlushDuration(s.cfg.Txn.Trace.FlushDuration.Duration)) 861 if err != nil { 862 panic(err) 863 } 864 rt.SetGlobalVariables(runtime.TxnTraceService, ts) 865 } 866 867 // SaveProfile saves profile into etl fs 868 // profileType defined in pkg/util/profile/profile.go 869 func SaveProfile(profilePath string, profileType string, etlFS fileservice.FileService) { 870 if len(profilePath) == 0 || len(profileType) == 0 || etlFS == nil { 871 return 872 } 873 reader, writer := io.Pipe() 874 go func() { 875 // dump all goroutines 876 _ = profile.ProfileRuntime(profileType, writer, 2) 877 _ = writer.Close() 878 }() 879 writeVec := fileservice.IOVector{ 880 FilePath: profilePath, 881 Entries: []fileservice.IOEntry{ 882 { 883 Offset: 0, 884 ReaderForWrite: reader, 885 Size: -1, 886 }, 887 }, 888 } 889 ctx, cancel := context.WithTimeout(context.TODO(), time.Minute*3) 890 defer cancel() 891 err := etlFS.Write(ctx, writeVec) 892 if err != nil { 893 logutil.Errorf("save profile %s failed. err:%v", profilePath, err) 894 return 895 } 896 } 897 898 type locker struct { 899 hakeeperClient logservice.CNHAKeeperClient 900 } 901 902 func (l *locker) Get( 903 ctx context.Context, 904 key string) (bool, error) { 905 v, err := l.hakeeperClient.AllocateIDByKeyWithBatch(ctx, key, 1) 906 if err != nil { 907 return false, err 908 } 909 return v == 1, nil 910 }