github.com/pingcap/tiup@v1.15.1/components/dm/spec/logic.go (about)

     1  // Copyright 2020 PingCAP, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package spec
    15  
    16  import (
    17  	"context"
    18  	"crypto/tls"
    19  	"fmt"
    20  	"path/filepath"
    21  	"strings"
    22  	"sync"
    23  	"time"
    24  
    25  	"github.com/pingcap/tiup/pkg/cluster/ctxt"
    26  	"github.com/pingcap/tiup/pkg/cluster/spec"
    27  	"github.com/pingcap/tiup/pkg/cluster/template/scripts"
    28  	"github.com/pingcap/tiup/pkg/meta"
    29  	"github.com/pingcap/tiup/pkg/utils"
    30  )
    31  
    32  // Components names supported by TiUP
    33  const (
    34  	ComponentDMMaster     = spec.ComponentDMMaster
    35  	ComponentDMWorker     = spec.ComponentDMWorker
    36  	ComponentPrometheus   = spec.ComponentPrometheus
    37  	ComponentGrafana      = spec.ComponentGrafana
    38  	ComponentAlertmanager = spec.ComponentAlertmanager
    39  )
    40  
    41  type (
    42  	// InstanceSpec represent a instance specification
    43  	InstanceSpec interface {
    44  		Role() string
    45  		SSH() (string, int)
    46  		GetMainPort() int
    47  		IsImported() bool
    48  		IgnoreMonitorAgent() bool
    49  	}
    50  )
    51  
    52  // Component represents a component of the cluster.
    53  type Component = spec.Component
    54  
    55  // Instance represents an instance
    56  type Instance = spec.Instance
    57  
    58  // DMMasterComponent represents TiDB component.
    59  type DMMasterComponent struct{ Topology *Specification }
    60  
    61  // Name implements Component interface.
    62  func (c *DMMasterComponent) Name() string {
    63  	return ComponentDMMaster
    64  }
    65  
    66  // Role implements Component interface.
    67  func (c *DMMasterComponent) Role() string {
    68  	return ComponentDMMaster
    69  }
    70  
    71  // Source implements Component interface.
    72  func (c *DMMasterComponent) Source() string {
    73  	source := c.Topology.ComponentSources.Master
    74  	if source != "" {
    75  		return source
    76  	}
    77  	return ComponentDMMaster
    78  }
    79  
    80  // CalculateVersion implements the Component interface
    81  func (c *DMMasterComponent) CalculateVersion(clusterVersion string) string {
    82  	return clusterVersion
    83  }
    84  
    85  // SetVersion implements Component interface.
    86  func (c *DMMasterComponent) SetVersion(version string) {
    87  	// not supported now
    88  }
    89  
    90  // Instances implements Component interface.
    91  func (c *DMMasterComponent) Instances() []Instance {
    92  	ins := make([]Instance, 0)
    93  	for _, s := range c.Topology.Masters {
    94  		s := s
    95  		ins = append(ins, &MasterInstance{
    96  			Name: s.Name,
    97  			BaseInstance: spec.BaseInstance{
    98  				InstanceSpec: s,
    99  				Name:         c.Name(),
   100  				Host:         s.Host,
   101  				ManageHost:   s.ManageHost,
   102  				ListenHost:   c.Topology.BaseTopo().GlobalOptions.ListenHost,
   103  				Port:         s.Port,
   104  				SSHP:         s.SSHPort,
   105  				Source:       s.Source,
   106  
   107  				Ports: []int{
   108  					s.Port,
   109  					s.PeerPort,
   110  				},
   111  				Dirs: []string{
   112  					s.DeployDir,
   113  					s.DataDir,
   114  				},
   115  				StatusFn: s.Status,
   116  				UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration {
   117  					return spec.UptimeByHost(s.Host, s.Port, timeout, tlsCfg)
   118  				},
   119  				Component: c,
   120  			},
   121  			topo: c.Topology,
   122  		})
   123  	}
   124  	return ins
   125  }
   126  
   127  // MasterInstance represent the TiDB instance
   128  type MasterInstance struct {
   129  	Name string
   130  	spec.BaseInstance
   131  	topo *Specification
   132  }
   133  
   134  // InitConfig implement Instance interface
   135  func (i *MasterInstance) InitConfig(
   136  	ctx context.Context,
   137  	e ctxt.Executor,
   138  	clusterName,
   139  	clusterVersion,
   140  	deployUser string,
   141  	paths meta.DirPaths,
   142  ) error {
   143  	if err := i.BaseInstance.InitConfig(ctx, e, i.topo.GlobalOptions, deployUser, paths); err != nil {
   144  		return err
   145  	}
   146  
   147  	enableTLS := i.topo.GlobalOptions.TLSEnabled
   148  	spec := i.InstanceSpec.(*MasterSpec)
   149  	scheme := utils.Ternary(enableTLS, "https", "http").(string)
   150  
   151  	initialCluster := []string{}
   152  	for _, masterspec := range i.topo.Masters {
   153  		initialCluster = append(initialCluster, fmt.Sprintf("%s=%s", masterspec.Name, masterspec.GetAdvertisePeerURL(enableTLS)))
   154  	}
   155  	cfg := &scripts.DMMasterScript{
   156  		Name:             spec.Name,
   157  		V1SourcePath:     spec.V1SourcePath,
   158  		MasterAddr:       utils.JoinHostPort(i.GetListenHost(), spec.Port),
   159  		AdvertiseAddr:    utils.JoinHostPort(spec.Host, spec.Port),
   160  		PeerURL:          fmt.Sprintf("%s://%s", scheme, utils.JoinHostPort(i.GetListenHost(), spec.PeerPort)),
   161  		AdvertisePeerURL: spec.GetAdvertisePeerURL(enableTLS),
   162  		InitialCluster:   strings.Join(initialCluster, ","),
   163  		DeployDir:        paths.Deploy,
   164  		DataDir:          paths.Data[0],
   165  		LogDir:           paths.Log,
   166  		NumaNode:         spec.NumaNode,
   167  	}
   168  
   169  	fp := filepath.Join(paths.Cache, fmt.Sprintf("run_dm-master_%s_%d.sh", i.GetHost(), i.GetPort()))
   170  	if err := cfg.ConfigToFile(fp); err != nil {
   171  		return err
   172  	}
   173  	dst := filepath.Join(paths.Deploy, "scripts", "run_dm-master.sh")
   174  	if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil {
   175  		return err
   176  	}
   177  	_, _, err := e.Execute(ctx, "chmod +x "+dst, false)
   178  	if err != nil {
   179  		return err
   180  	}
   181  
   182  	if spec.Config, err = i.setTLSConfig(ctx, enableTLS, spec.Config, paths); err != nil {
   183  		return err
   184  	}
   185  
   186  	specConfig := spec.Config
   187  	return i.MergeServerConfig(ctx, e, i.topo.ServerConfigs.Master, specConfig, paths)
   188  }
   189  
   190  // setTLSConfig set TLS Config to support enable/disable TLS
   191  // MasterInstance no need to configure TLS
   192  func (i *MasterInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) {
   193  	// set TLS configs
   194  	if enableTLS {
   195  		if configs == nil {
   196  			configs = make(map[string]any)
   197  		}
   198  		configs["ssl-ca"] = fmt.Sprintf(
   199  			"%s/tls/%s",
   200  			paths.Deploy,
   201  			"ca.crt",
   202  		)
   203  		configs["ssl-cert"] = fmt.Sprintf(
   204  			"%s/tls/%s.crt",
   205  			paths.Deploy,
   206  			i.Role())
   207  		configs["ssl-key"] = fmt.Sprintf(
   208  			"%s/tls/%s.pem",
   209  			paths.Deploy,
   210  			i.Role())
   211  	} else {
   212  		// dm-master tls config list
   213  		tlsConfigs := []string{
   214  			"ssl-ca",
   215  			"ssl-cert",
   216  			"ssl-key",
   217  		}
   218  		// delete TLS configs
   219  		if configs != nil {
   220  			for _, config := range tlsConfigs {
   221  				delete(configs, config)
   222  			}
   223  		}
   224  	}
   225  
   226  	return configs, nil
   227  }
   228  
   229  // ScaleConfig deploy temporary config on scaling
   230  func (i *MasterInstance) ScaleConfig(
   231  	ctx context.Context,
   232  	e ctxt.Executor,
   233  	topo spec.Topology,
   234  	clusterName,
   235  	clusterVersion,
   236  	deployUser string,
   237  	paths meta.DirPaths,
   238  ) error {
   239  	if err := i.InitConfig(ctx, e, clusterName, clusterVersion, deployUser, paths); err != nil {
   240  		return err
   241  	}
   242  
   243  	enableTLS := i.topo.GlobalOptions.TLSEnabled
   244  	spec := i.InstanceSpec.(*MasterSpec)
   245  	scheme := utils.Ternary(enableTLS, "https", "http").(string)
   246  
   247  	masters := []string{}
   248  	// master list from exist topo file
   249  	for _, masterspec := range topo.(*Specification).Masters {
   250  		masters = append(masters, utils.JoinHostPort(masterspec.Host, masterspec.Port))
   251  	}
   252  	cfg := &scripts.DMMasterScaleScript{
   253  		Name:             spec.Name,
   254  		V1SourcePath:     spec.V1SourcePath,
   255  		MasterAddr:       utils.JoinHostPort(i.GetListenHost(), spec.Port),
   256  		AdvertiseAddr:    utils.JoinHostPort(spec.Host, spec.Port),
   257  		PeerURL:          fmt.Sprintf("%s://%s", scheme, utils.JoinHostPort(i.GetListenHost(), spec.PeerPort)),
   258  		AdvertisePeerURL: spec.GetAdvertisePeerURL(enableTLS),
   259  		Join:             strings.Join(masters, ","),
   260  		DeployDir:        paths.Deploy,
   261  		DataDir:          paths.Data[0],
   262  		LogDir:           paths.Log,
   263  		NumaNode:         spec.NumaNode,
   264  	}
   265  
   266  	fp := filepath.Join(paths.Cache, fmt.Sprintf("run_dm-master_%s_%d.sh", i.GetHost(), i.GetPort()))
   267  	if err := cfg.ConfigToFile(fp); err != nil {
   268  		return err
   269  	}
   270  
   271  	dst := filepath.Join(paths.Deploy, "scripts", "run_dm-master.sh")
   272  	if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil {
   273  		return err
   274  	}
   275  	if _, _, err := e.Execute(ctx, "chmod +x "+dst, false); err != nil {
   276  		return err
   277  	}
   278  
   279  	return nil
   280  }
   281  
   282  // DMWorkerComponent represents DM worker component.
   283  type DMWorkerComponent struct{ Topology *Specification }
   284  
   285  // Name implements Component interface.
   286  func (c *DMWorkerComponent) Name() string {
   287  	return ComponentDMWorker
   288  }
   289  
   290  // Role implements Component interface.
   291  func (c *DMWorkerComponent) Role() string {
   292  	return ComponentDMWorker
   293  }
   294  
   295  // Source implements Component interface.
   296  func (c *DMWorkerComponent) Source() string {
   297  	source := c.Topology.ComponentSources.Worker
   298  	if source != "" {
   299  		return source
   300  	}
   301  	return ComponentDMWorker
   302  }
   303  
   304  // CalculateVersion implements the Component interface
   305  func (c *DMWorkerComponent) CalculateVersion(clusterVersion string) string {
   306  	return clusterVersion
   307  }
   308  
   309  // SetVersion implements Component interface.
   310  func (c *DMWorkerComponent) SetVersion(version string) {
   311  	// not supported now
   312  }
   313  
   314  // Instances implements Component interface.
   315  func (c *DMWorkerComponent) Instances() []Instance {
   316  	ins := make([]Instance, 0)
   317  	for _, s := range c.Topology.Workers {
   318  		s := s
   319  		ins = append(ins, &WorkerInstance{
   320  			Name: s.Name,
   321  			BaseInstance: spec.BaseInstance{
   322  				InstanceSpec: s,
   323  				Name:         c.Name(),
   324  				Host:         s.Host,
   325  				ManageHost:   s.ManageHost,
   326  				ListenHost:   c.Topology.BaseTopo().GlobalOptions.ListenHost,
   327  				Port:         s.Port,
   328  				SSHP:         s.SSHPort,
   329  				Source:       s.Source,
   330  
   331  				Ports: []int{
   332  					s.Port,
   333  				},
   334  				Dirs: []string{
   335  					s.DeployDir,
   336  					s.DataDir,
   337  				},
   338  				StatusFn: s.Status,
   339  				UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration {
   340  					return spec.UptimeByHost(s.Host, s.Port, timeout, tlsCfg)
   341  				},
   342  				Component: c,
   343  			},
   344  			topo: c.Topology,
   345  		})
   346  	}
   347  
   348  	return ins
   349  }
   350  
   351  // WorkerInstance represent the DM worker instance
   352  type WorkerInstance struct {
   353  	Name string
   354  	spec.BaseInstance
   355  	topo *Specification
   356  }
   357  
   358  // InitConfig implement Instance interface
   359  func (i *WorkerInstance) InitConfig(
   360  	ctx context.Context,
   361  	e ctxt.Executor,
   362  	clusterName,
   363  	clusterVersion,
   364  	deployUser string,
   365  	paths meta.DirPaths,
   366  ) error {
   367  	if err := i.BaseInstance.InitConfig(ctx, e, i.topo.GlobalOptions, deployUser, paths); err != nil {
   368  		return err
   369  	}
   370  
   371  	enableTLS := i.topo.GlobalOptions.TLSEnabled
   372  	spec := i.InstanceSpec.(*WorkerSpec)
   373  
   374  	masters := []string{}
   375  	for _, masterspec := range i.topo.Masters {
   376  		masters = append(masters, utils.JoinHostPort(masterspec.Host, masterspec.Port))
   377  	}
   378  	cfg := &scripts.DMWorkerScript{
   379  		Name:          i.Name,
   380  		WorkerAddr:    utils.JoinHostPort(i.GetListenHost(), spec.Port),
   381  		AdvertiseAddr: utils.JoinHostPort(spec.Host, spec.Port),
   382  		Join:          strings.Join(masters, ","),
   383  
   384  		DeployDir: paths.Deploy,
   385  		LogDir:    paths.Log,
   386  		NumaNode:  spec.NumaNode,
   387  	}
   388  
   389  	fp := filepath.Join(paths.Cache, fmt.Sprintf("run_dm-worker_%s_%d.sh", i.GetHost(), i.GetPort()))
   390  	if err := cfg.ConfigToFile(fp); err != nil {
   391  		return err
   392  	}
   393  	dst := filepath.Join(paths.Deploy, "scripts", "run_dm-worker.sh")
   394  
   395  	if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil {
   396  		return err
   397  	}
   398  
   399  	_, _, err := e.Execute(ctx, "chmod +x "+dst, false)
   400  	if err != nil {
   401  		return err
   402  	}
   403  
   404  	if spec.Config, err = i.setTLSConfig(ctx, enableTLS, spec.Config, paths); err != nil {
   405  		return err
   406  	}
   407  
   408  	specConfig := spec.Config
   409  	return i.MergeServerConfig(ctx, e, i.topo.ServerConfigs.Worker, specConfig, paths)
   410  }
   411  
   412  // setTLSConfig set TLS Config to support enable/disable TLS
   413  // workrsInstance no need to configure TLS
   414  func (i *WorkerInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) {
   415  	// set TLS configs
   416  	if enableTLS {
   417  		if configs == nil {
   418  			configs = make(map[string]any)
   419  		}
   420  		configs["ssl-ca"] = fmt.Sprintf(
   421  			"%s/tls/%s",
   422  			paths.Deploy,
   423  			"ca.crt",
   424  		)
   425  		configs["ssl-cert"] = fmt.Sprintf(
   426  			"%s/tls/%s.crt",
   427  			paths.Deploy,
   428  			i.Role())
   429  		configs["ssl-key"] = fmt.Sprintf(
   430  			"%s/tls/%s.pem",
   431  			paths.Deploy,
   432  			i.Role())
   433  	} else {
   434  		// dm-worker tls config list
   435  		tlsConfigs := []string{
   436  			"ssl-ca",
   437  			"ssl-cert",
   438  			"ssl-key",
   439  		}
   440  		// delete TLS configs
   441  		if configs != nil {
   442  			for _, config := range tlsConfigs {
   443  				delete(configs, config)
   444  			}
   445  		}
   446  	}
   447  
   448  	return configs, nil
   449  }
   450  
   451  // ScaleConfig deploy temporary config on scaling
   452  func (i *WorkerInstance) ScaleConfig(
   453  	ctx context.Context,
   454  	e ctxt.Executor,
   455  	topo spec.Topology,
   456  	clusterName,
   457  	clusterVersion,
   458  	deployUser string,
   459  	paths meta.DirPaths,
   460  ) error {
   461  	s := i.topo
   462  	defer func() {
   463  		i.topo = s
   464  	}()
   465  	i.topo = topo.(*Specification)
   466  	return i.InitConfig(ctx, e, clusterName, clusterVersion, deployUser, paths)
   467  }
   468  
   469  // GetGlobalOptions returns cluster topology
   470  func (topo *Specification) GetGlobalOptions() spec.GlobalOptions {
   471  	return topo.GlobalOptions
   472  }
   473  
   474  // GetMonitoredOptions returns MonitoredOptions
   475  func (topo *Specification) GetMonitoredOptions() *spec.MonitoredOptions {
   476  	return topo.MonitoredOptions
   477  }
   478  
   479  // ComponentsByStopOrder return component in the order need to stop.
   480  func (topo *Specification) ComponentsByStopOrder() (comps []Component) {
   481  	comps = topo.ComponentsByStartOrder()
   482  	// revert order
   483  	i := 0
   484  	j := len(comps) - 1
   485  	for i < j {
   486  		comps[i], comps[j] = comps[j], comps[i]
   487  		i++
   488  		j--
   489  	}
   490  	return
   491  }
   492  
   493  // ComponentsByStartOrder return component in the order need to start.
   494  func (topo *Specification) ComponentsByStartOrder() (comps []Component) {
   495  	// "dm-master", "dm-worker"
   496  	comps = append(comps, &DMMasterComponent{topo})
   497  	comps = append(comps, &DMWorkerComponent{topo})
   498  	comps = append(comps, &spec.MonitorComponent{Topology: topo}) // prometheus
   499  	comps = append(comps, &spec.GrafanaComponent{Topology: topo})
   500  	comps = append(comps, &spec.AlertManagerComponent{Topology: topo})
   501  	return
   502  }
   503  
   504  // ComponentsByUpdateOrder return component in the order need to be updated.
   505  func (topo *Specification) ComponentsByUpdateOrder(curVer string) (comps []Component) {
   506  	// "dm-master", "dm-worker"
   507  	comps = append(comps, &DMMasterComponent{topo})
   508  	comps = append(comps, &DMWorkerComponent{topo})
   509  	comps = append(comps, &spec.MonitorComponent{Topology: topo})
   510  	comps = append(comps, &spec.GrafanaComponent{Topology: topo})
   511  	comps = append(comps, &spec.AlertManagerComponent{Topology: topo})
   512  	return
   513  }
   514  
   515  // IterComponent iterates all components in component starting order
   516  func (topo *Specification) IterComponent(fn func(comp Component)) {
   517  	for _, comp := range topo.ComponentsByStartOrder() {
   518  		fn(comp)
   519  	}
   520  }
   521  
   522  // IterInstance iterates all instances in component starting order
   523  func (topo *Specification) IterInstance(fn func(instance Instance), concurrency ...int) {
   524  	maxWorkers := 1
   525  	wg := sync.WaitGroup{}
   526  	if len(concurrency) > 0 && concurrency[0] > 1 {
   527  		maxWorkers = concurrency[0]
   528  	}
   529  	workerPool := make(chan struct{}, maxWorkers)
   530  
   531  	for _, comp := range topo.ComponentsByStartOrder() {
   532  		for _, inst := range comp.Instances() {
   533  			wg.Add(1)
   534  			workerPool <- struct{}{}
   535  			go func(inst Instance) {
   536  				defer func() {
   537  					<-workerPool
   538  					wg.Done()
   539  				}()
   540  				fn(inst)
   541  			}(inst)
   542  		}
   543  	}
   544  	wg.Wait()
   545  }
   546  
   547  // IterHost iterates one instance for each host
   548  func (topo *Specification) IterHost(fn func(instance Instance)) {
   549  	hostMap := make(map[string]bool)
   550  	for _, comp := range topo.ComponentsByStartOrder() {
   551  		for _, inst := range comp.Instances() {
   552  			host := inst.GetHost()
   553  			_, ok := hostMap[host]
   554  			if !ok {
   555  				hostMap[host] = true
   556  				fn(inst)
   557  			}
   558  		}
   559  	}
   560  }