github.com/1aal/kubeblocks@v0.0.0-20231107070852-e1c03e598921/pkg/cli/cmd/cluster/create.go (about)

     1  /*
     2  Copyright (C) 2022-2023 ApeCloud Co., Ltd
     3  
     4  This file is part of KubeBlocks project
     5  
     6  This program is free software: you can redistribute it and/or modify
     7  it under the terms of the GNU Affero General Public License as published by
     8  the Free Software Foundation, either version 3 of the License, or
     9  (at your option) any later version.
    10  
    11  This program is distributed in the hope that it will be useful
    12  but WITHOUT ANY WARRANTY; without even the implied warranty of
    13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    14  GNU Affero General Public License for more details.
    15  
    16  You should have received a copy of the GNU Affero General Public License
    17  along with this program.  If not, see <http://www.gnu.org/licenses/>.
    18  */
    19  
    20  package cluster
    21  
    22  import (
    23  	"context"
    24  	"encoding/json"
    25  	"fmt"
    26  	"io"
    27  	"math"
    28  	"net/http"
    29  	"os"
    30  	"regexp"
    31  	"strconv"
    32  	"strings"
    33  	"time"
    34  
    35  	"github.com/ghodss/yaml"
    36  	"github.com/robfig/cron/v3"
    37  	"github.com/spf13/cobra"
    38  	"github.com/spf13/pflag"
    39  	corev1 "k8s.io/api/core/v1"
    40  	"k8s.io/apimachinery/pkg/api/errors"
    41  	"k8s.io/apimachinery/pkg/api/resource"
    42  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    43  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    44  	"k8s.io/apimachinery/pkg/runtime"
    45  	"k8s.io/cli-runtime/pkg/genericiooptions"
    46  	corev1ac "k8s.io/client-go/applyconfigurations/core/v1"
    47  	rbacv1ac "k8s.io/client-go/applyconfigurations/rbac/v1"
    48  	"k8s.io/client-go/dynamic"
    49  	"k8s.io/klog/v2"
    50  	cmdutil "k8s.io/kubectl/pkg/cmd/util"
    51  	utilcomp "k8s.io/kubectl/pkg/util/completion"
    52  	"k8s.io/kubectl/pkg/util/storage"
    53  	"k8s.io/kubectl/pkg/util/templates"
    54  
    55  	appsv1alpha1 "github.com/1aal/kubeblocks/apis/apps/v1alpha1"
    56  	dpv1alpha1 "github.com/1aal/kubeblocks/apis/dataprotection/v1alpha1"
    57  
    58  	"github.com/1aal/kubeblocks/pkg/class"
    59  	"github.com/1aal/kubeblocks/pkg/cli/cluster"
    60  	classutil "github.com/1aal/kubeblocks/pkg/cli/cmd/class"
    61  	"github.com/1aal/kubeblocks/pkg/cli/create"
    62  	"github.com/1aal/kubeblocks/pkg/cli/printer"
    63  	"github.com/1aal/kubeblocks/pkg/cli/types"
    64  	"github.com/1aal/kubeblocks/pkg/cli/util"
    65  	"github.com/1aal/kubeblocks/pkg/constant"
    66  	dptypes "github.com/1aal/kubeblocks/pkg/dataprotection/types"
    67  	"github.com/1aal/kubeblocks/pkg/dataprotection/utils/boolptr"
    68  	viper "github.com/1aal/kubeblocks/pkg/viperx"
    69  )
    70  
    71  var clusterCreateExample = templates.Examples(`
    72  	# Create a cluster with cluster definition apecloud-mysql and cluster version ac-mysql-8.0.30
    73  	kbcli cluster create mycluster --cluster-definition apecloud-mysql --cluster-version ac-mysql-8.0.30
    74  
    75  	# --cluster-definition is required, if --cluster-version is not specified, pick the most recently created version
    76  	kbcli cluster create mycluster --cluster-definition apecloud-mysql
    77  
    78  	# Output resource information in YAML format, without creation of resources.
    79  	kbcli cluster create mycluster --cluster-definition apecloud-mysql --dry-run -o yaml
    80  
    81  	# Output resource information in YAML format, the information will be sent to the server
    82  	# but the resources will not be actually created.
    83  	kbcli cluster create mycluster --cluster-definition apecloud-mysql --dry-run=server -o yaml
    84  	
    85  	# Create a cluster and set termination policy DoNotTerminate that prevents the cluster from being deleted
    86  	kbcli cluster create mycluster --cluster-definition apecloud-mysql --termination-policy DoNotTerminate
    87  
    88  	# Delete resources such as statefulsets, deployments, services, pdb, but keep PVCs
    89  	# when deleting the cluster, use termination policy Halt
    90  	kbcli cluster create mycluster --cluster-definition apecloud-mysql --termination-policy Halt
    91  
    92  	# Delete resource such as statefulsets, deployments, services, pdb, and including
    93  	# PVCs when deleting the cluster, use termination policy Delete
    94  	kbcli cluster create mycluster --cluster-definition apecloud-mysql --termination-policy Delete
    95  
    96  	# Delete all resources including all snapshots and snapshot data when deleting
    97  	# the cluster, use termination policy WipeOut
    98  	kbcli cluster create mycluster --cluster-definition apecloud-mysql --termination-policy WipeOut
    99  
   100  	# Create a cluster and set cpu to 1 core, memory to 1Gi, storage size to 20Gi and replicas to 3
   101  	kbcli cluster create mycluster --cluster-definition apecloud-mysql --set cpu=1,memory=1Gi,storage=20Gi,replicas=3
   102  
   103  	# Create a cluster and set storageClass to csi-hostpath-sc, if storageClass is not specified,
   104  	# the default storage class will be used
   105  	kbcli cluster create mycluster --cluster-definition apecloud-mysql --set storageClass=csi-hostpath-sc
   106  
   107  	# Create a cluster with replicationSet workloadType and set switchPolicy to Noop
   108  	kbcli cluster create mycluster --cluster-definition postgresql --set switchPolicy=Noop
   109  
   110  	# Create a cluster with more than one component, use "--set type=component-name" to specify the component,
   111  	# if not specified, the main component will be used, run "kbcli cd list-components CLUSTER-DEFINITION-NAME"
   112  	# to show the components in the cluster definition
   113  	kbcli cluster create mycluster --cluster-definition redis --set type=redis,cpu=1 --set type=redis-sentinel,cpu=200m
   114  
   115  	# Create a cluster and use a URL to set cluster resource
   116  	kbcli cluster create mycluster --cluster-definition apecloud-mysql \
   117  		--set-file https://kubeblocks.io/yamls/apecloud-mysql.yaml
   118  
   119  	# Create a cluster and load cluster resource set from stdin
   120  	cat << EOF | kbcli cluster create mycluster --cluster-definition apecloud-mysql --set-file -
   121  	- name: my-test ...
   122  
   123  	# Create a cluster scattered by nodes
   124  	kbcli cluster create --cluster-definition apecloud-mysql --topology-keys kubernetes.io/hostname \
   125  		--pod-anti-affinity Required
   126  
   127  	# Create a cluster in specific labels nodes
   128  	kbcli cluster create --cluster-definition apecloud-mysql \
   129  		--node-labels '"topology.kubernetes.io/zone=us-east-1a","disktype=ssd,essd"'
   130  
   131  	# Create a Cluster with two tolerations 
   132  	kbcli cluster create --cluster-definition apecloud-mysql --tolerations \ '"engineType=mongo:NoSchedule","diskType=ssd:NoSchedule"'
   133  
   134      # Create a cluster, with each pod runs on their own dedicated node
   135      kbcli cluster create --cluster-definition apecloud-mysql --tenancy=DedicatedNode
   136  
   137      # Create a cluster with backup to restore data
   138      kbcli cluster create --backup backup-default-mycluster-20230616190023
   139  
   140      # Create a cluster with time to restore from point in time
   141      kbcli cluster create --restore-to-time "Jun 16,2023 18:58:53 UTC+0800" --source-cluster mycluster
   142  
   143  	# Create a cluster with auto backup
   144  	kbcli cluster create --cluster-definition apecloud-mysql --backup-enabled
   145  
   146  	# Create a cluster with default component having multiple storage volumes
   147  	kbcli cluster create --cluster-definition oceanbase --pvc name=data-file,size=50Gi --pvc name=data-log,size=50Gi --pvc name=log,size=20Gi
   148  
   149  	# Create a cluster with specifying a component having multiple storage volumes
   150  	kbcli cluster create --cluster-definition pulsar --pvc type=bookies,name=ledgers,size=20Gi --pvc type=bookies,name=journal,size=20Gi
   151  
   152  	# Create a cluster with using a service reference to another KubeBlocks cluster
   153  	kbcli cluster create --cluster-definition pulsar --service-reference name=pulsarZookeeper,cluster=zookeeper,namespace=default
   154  `)
   155  
   156  const (
   157  	CueTemplateName = "cluster_template.cue"
   158  	monitorKey      = "monitor"
   159  	apeCloudMysql   = "apecloud-mysql"
   160  )
   161  
   162  type setKey string
   163  
   164  const (
   165  	keyType         setKey = "type"
   166  	keyCPU          setKey = "cpu"
   167  	keyClass        setKey = "class"
   168  	keyMemory       setKey = "memory"
   169  	keyReplicas     setKey = "replicas"
   170  	keyStorage      setKey = "storage"
   171  	keyStorageClass setKey = "storageClass"
   172  	keySwitchPolicy setKey = "switchPolicy"
   173  	keyUnknown      setKey = "unknown"
   174  )
   175  
   176  var setKeyCfg = map[setKey]string{
   177  	keyCPU:      types.CfgKeyClusterDefaultCPU,
   178  	keyMemory:   types.CfgKeyClusterDefaultMemory,
   179  	keyStorage:  types.CfgKeyClusterDefaultStorageSize,
   180  	keyReplicas: types.CfgKeyClusterDefaultReplicas,
   181  }
   182  
   183  // With the access of various databases, the simple way of specifying the capacity of storage by --set
   184  // no longer meets the current demand, because many clusters' components are set up with multiple pvc, so we split the way of setting storage from `--set`.
   185  type storageKey string
   186  
   187  // map[string]map[storageKey]string `json:"-"`
   188  const (
   189  	// storageKeyType is the key of CreateOptions.Storages, reference to a cluster component name
   190  	storageKeyType storageKey = "type"
   191  	// storageKeyName is the name of a pvc in volumeClaimTemplates, like "data" or "log"
   192  	storageKeyName storageKey = "name"
   193  	// storageKeyStorageClass is the storageClass of a pvc
   194  	storageKeyStorageClass storageKey = "storageClass"
   195  	// storageAccessMode is the storageAccessMode of a pvc, could be ReadWriteOnce,ReadOnlyMany,ReadWriteMany.
   196  	// more information in https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes
   197  	storageAccessMode storageKey = "mode"
   198  	// storageKeySize is the size of a pvc
   199  	storageKeySize storageKey = "size"
   200  
   201  	storageKeyUnknown storageKey = "unknown"
   202  )
   203  
   204  // UpdatableFlags is the flags that cat be updated by update command
   205  type UpdatableFlags struct {
   206  	// Options for cluster termination policy
   207  	TerminationPolicy string `json:"terminationPolicy"`
   208  
   209  	// Add-on switches for cluster observability
   210  	MonitoringInterval uint8 `json:"monitor"`
   211  	EnableAllLogs      bool  `json:"enableAllLogs"`
   212  
   213  	// Configuration and options for cluster affinity and tolerations
   214  	PodAntiAffinity string `json:"podAntiAffinity"`
   215  	// TopologyKeys if TopologyKeys is nil, add omitempty json tag, because CueLang can not covert null to list.
   216  	TopologyKeys   []string          `json:"topologyKeys,omitempty"`
   217  	NodeLabels     map[string]string `json:"nodeLabels,omitempty"`
   218  	Tenancy        string            `json:"tenancy"`
   219  	TolerationsRaw []string          `json:"-"`
   220  
   221  	// backup config
   222  	BackupEnabled                 bool   `json:"-"`
   223  	BackupRetentionPeriod         string `json:"-"`
   224  	BackupMethod                  string `json:"-"`
   225  	BackupCronExpression          string `json:"-"`
   226  	BackupStartingDeadlineMinutes int64  `json:"-"`
   227  	BackupRepoName                string `json:"-"`
   228  	BackupPITREnabled             bool   `json:"-"`
   229  }
   230  
   231  type CreateOptions struct {
   232  	// ClusterDefRef reference clusterDefinition
   233  	ClusterDefRef     string                   `json:"clusterDefRef"`
   234  	ClusterVersionRef string                   `json:"clusterVersionRef"`
   235  	Tolerations       []interface{}            `json:"tolerations,omitempty"`
   236  	ComponentSpecs    []map[string]interface{} `json:"componentSpecs"`
   237  	Annotations       map[string]string        `json:"annotations,omitempty"`
   238  	SetFile           string                   `json:"-"`
   239  	Values            []string                 `json:"-"`
   240  	RBACEnabled       bool                     `json:"-"`
   241  	Storages          []string                 `json:"-"`
   242  	ServiceRef        []string                 `json:"-"`
   243  
   244  	// backup name to restore in creation
   245  	Backup              string `json:"backup,omitempty"`
   246  	RestoreTime         string `json:"restoreTime,omitempty"`
   247  	VolumeRestorePolicy string `json:"-"`
   248  
   249  	// backup config
   250  	BackupConfig *appsv1alpha1.ClusterBackup `json:"backupConfig,omitempty"`
   251  
   252  	Cmd *cobra.Command `json:"-"`
   253  
   254  	UpdatableFlags
   255  	create.CreateOptions `json:"-"`
   256  }
   257  
   258  func NewCreateCmd(f cmdutil.Factory, streams genericiooptions.IOStreams) *cobra.Command {
   259  	o := NewCreateOptions(f, streams)
   260  	cmd := &cobra.Command{
   261  		Use:     "create [NAME]",
   262  		Short:   "Create a cluster.",
   263  		Example: clusterCreateExample,
   264  		Run: func(cmd *cobra.Command, args []string) {
   265  			o.Args = args
   266  			cmdutil.CheckErr(o.CreateOptions.Complete())
   267  			cmdutil.CheckErr(o.Complete())
   268  			cmdutil.CheckErr(o.Validate())
   269  			cmdutil.CheckErr(o.Run())
   270  		},
   271  	}
   272  
   273  	cmd.Flags().StringVar(&o.ClusterDefRef, "cluster-definition", "", "Specify cluster definition, run \"kbcli cd list\" to show all available cluster definitions")
   274  	cmd.Flags().StringVar(&o.ClusterVersionRef, "cluster-version", "", "Specify cluster version, run \"kbcli cv list\" to show all available cluster versions, use the latest version if not specified")
   275  	cmd.Flags().StringVarP(&o.SetFile, "set-file", "f", "", "Use yaml file, URL, or stdin to set the cluster resource")
   276  	cmd.Flags().StringArrayVar(&o.Values, "set", []string{}, "Set the cluster resource including cpu, memory, replicas and storage, each set corresponds to a component.(e.g. --set cpu=1,memory=1Gi,replicas=3,storage=20Gi or --set class=general-1c1g)")
   277  	cmd.Flags().StringArrayVar(&o.Storages, "pvc", []string{}, "Set the cluster detail persistent volume claim, each '--pvc' corresponds to a component, and will override the simple configurations about storage by --set (e.g. --pvc type=mysql,name=data,mode=ReadWriteOnce,size=20Gi --pvc type=mysql,name=log,mode=ReadWriteOnce,size=1Gi)")
   278  	cmd.Flags().StringArrayVar(&o.ServiceRef, "service-reference", []string{}, "Set the other KubeBlocks cluster dependencies, each '--service-reference' corresponds to a cluster service. (e.g --service-reference name=pulsarZookeeper,cluster=zookeeper,namespace=default)")
   279  
   280  	cmd.Flags().StringVar(&o.Backup, "backup", "", "Set a source backup to restore data")
   281  	cmd.Flags().StringVar(&o.RestoreTime, "restore-to-time", "", "Set a time for point in time recovery")
   282  	cmd.Flags().StringVar(&o.VolumeRestorePolicy, "volume-restore-policy", "Parallel", "the volume claim restore policy, supported values: [Serial, Parallel]")
   283  	cmd.Flags().BoolVar(&o.RBACEnabled, "rbac-enabled", false, "Specify whether rbac resources will be created by kbcli, otherwise KubeBlocks server will try to create rbac resources")
   284  	cmd.PersistentFlags().BoolVar(&o.EditBeforeCreate, "edit", o.EditBeforeCreate, "Edit the API resource before creating")
   285  	cmd.PersistentFlags().StringVar(&o.DryRun, "dry-run", "none", `Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent.`)
   286  	cmd.PersistentFlags().Lookup("dry-run").NoOptDefVal = "unchanged"
   287  
   288  	// add updatable flags
   289  	o.UpdatableFlags.addFlags(cmd)
   290  
   291  	// add print flags
   292  	printer.AddOutputFlagForCreate(cmd, &o.Format, true)
   293  
   294  	// register flag completion func
   295  	registerFlagCompletionFunc(cmd, f)
   296  
   297  	// add all subcommands for supported cluster type
   298  	cmd.AddCommand(buildCreateSubCmds(&o.CreateOptions)...)
   299  
   300  	o.Cmd = cmd
   301  
   302  	return cmd
   303  }
   304  
   305  func NewCreateOptions(f cmdutil.Factory, streams genericiooptions.IOStreams) *CreateOptions {
   306  	o := &CreateOptions{CreateOptions: create.CreateOptions{
   307  		Factory:         f,
   308  		IOStreams:       streams,
   309  		CueTemplateName: CueTemplateName,
   310  		GVR:             types.ClusterGVR(),
   311  	}}
   312  	o.CreateOptions.Options = o
   313  	o.CreateOptions.PreCreate = o.PreCreate
   314  	o.CreateOptions.CreateDependencies = o.CreateDependencies
   315  	o.CreateOptions.CleanUpFn = o.CleanUp
   316  	return o
   317  }
   318  
   319  func setMonitor(monitoringInterval uint8, components []map[string]interface{}) {
   320  	if len(components) == 0 {
   321  		return
   322  	}
   323  	for _, component := range components {
   324  		component[monitorKey] = monitoringInterval != 0
   325  	}
   326  }
   327  
   328  func getRestoreFromBackupAnnotation(backup *dpv1alpha1.Backup, volumeRestorePolicy string, compSpecsCount int, firstCompName string, restoreTime string) (string, error) {
   329  	componentName := backup.Labels[constant.KBAppComponentLabelKey]
   330  	if len(componentName) == 0 {
   331  		if compSpecsCount != 1 {
   332  			return "", fmt.Errorf("unable to obtain the name of the component to be recovered, please ensure that Backup.status.componentName exists")
   333  		}
   334  		componentName = firstCompName
   335  	}
   336  	backupNameString := fmt.Sprintf(`"%s":"%s"`, constant.BackupNameKeyForRestore, backup.Name)
   337  	backupNamespaceString := fmt.Sprintf(`"%s":"%s"`, constant.BackupNamespaceKeyForRestore, backup.Namespace)
   338  	volumeRestorePolicyString := fmt.Sprintf(`"%s":"%s"`, constant.VolumeRestorePolicyKeyForRestore, volumeRestorePolicy)
   339  	var restoreTimeString string
   340  	if restoreTime != "" {
   341  		restoreTimeString = fmt.Sprintf(`,"%s":"%s"`, constant.RestoreTimeKeyForRestore, restoreTime)
   342  	}
   343  
   344  	var passwordString string
   345  	connectionPassword := backup.Annotations[dptypes.ConnectionPasswordKey]
   346  	if connectionPassword != "" {
   347  		passwordString = fmt.Sprintf(`,"%s":"%s"`, constant.ConnectionPassword, connectionPassword)
   348  	}
   349  
   350  	restoreFromBackupAnnotation := fmt.Sprintf(`{"%s":{%s,%s,%s%s%s}}`, componentName, backupNameString, backupNamespaceString, volumeRestorePolicyString, restoreTimeString, passwordString)
   351  	return restoreFromBackupAnnotation, nil
   352  }
   353  
   354  func getSourceClusterFromBackup(backup *dpv1alpha1.Backup) (*appsv1alpha1.Cluster, error) {
   355  	sourceCluster := &appsv1alpha1.Cluster{}
   356  	sourceClusterJSON := backup.Annotations[constant.ClusterSnapshotAnnotationKey]
   357  	if err := json.Unmarshal([]byte(sourceClusterJSON), sourceCluster); err != nil {
   358  		return nil, err
   359  	}
   360  
   361  	return sourceCluster, nil
   362  }
   363  
   364  func getBackupObjectFromRestoreArgs(o *CreateOptions, backup *dpv1alpha1.Backup) error {
   365  	if o.Backup == "" {
   366  		return nil
   367  	}
   368  	if err := cluster.GetK8SClientObject(o.Dynamic, backup, types.BackupGVR(), o.Namespace, o.Backup); err != nil {
   369  		return err
   370  	}
   371  	return nil
   372  }
   373  
   374  func fillClusterInfoFromBackup(o *CreateOptions, cls **appsv1alpha1.Cluster) error {
   375  	if o.Backup == "" {
   376  		return nil
   377  	}
   378  	backup := &dpv1alpha1.Backup{}
   379  	if err := getBackupObjectFromRestoreArgs(o, backup); err != nil {
   380  		return err
   381  	}
   382  	backupCluster, err := getSourceClusterFromBackup(backup)
   383  	if err != nil {
   384  		return err
   385  	}
   386  	curCluster := *cls
   387  	if curCluster == nil {
   388  		curCluster = backupCluster
   389  	}
   390  
   391  	// validate cluster spec
   392  	if o.ClusterDefRef != "" && o.ClusterDefRef != backupCluster.Spec.ClusterDefRef {
   393  		return fmt.Errorf("specified cluster definition does not match from backup(expect: %s, actual: %s),"+
   394  			" please check", backupCluster.Spec.ClusterDefRef, o.ClusterDefRef)
   395  	}
   396  	if o.ClusterVersionRef != "" && o.ClusterVersionRef != backupCluster.Spec.ClusterVersionRef {
   397  		return fmt.Errorf("specified cluster version does not match from backup(expect: %s, actual: %s),"+
   398  			" please check", backupCluster.Spec.ClusterVersionRef, o.ClusterVersionRef)
   399  	}
   400  
   401  	o.ClusterDefRef = curCluster.Spec.ClusterDefRef
   402  	o.ClusterVersionRef = curCluster.Spec.ClusterVersionRef
   403  
   404  	*cls = curCluster
   405  	return nil
   406  }
   407  
   408  func formatRestoreTimeAndValidate(restoreTimeStr string, continuousBackup *dpv1alpha1.Backup) (string, error) {
   409  	if restoreTimeStr == "" {
   410  		return restoreTimeStr, nil
   411  	}
   412  	restoreTime, err := util.TimeParse(restoreTimeStr, time.Second)
   413  	if err != nil {
   414  		// retry to parse time with RFC3339 format.
   415  		var errRFC error
   416  		restoreTime, errRFC = time.Parse(time.RFC3339, restoreTimeStr)
   417  		if errRFC != nil {
   418  			// if retry failure, report the error
   419  			return restoreTimeStr, err
   420  		}
   421  	}
   422  	restoreTimeStr = restoreTime.Format(time.RFC3339)
   423  	// TODO: check with Recoverable time
   424  	if !isTimeInRange(restoreTime, continuousBackup.Status.TimeRange.Start.Time, continuousBackup.Status.TimeRange.End.Time) {
   425  		return restoreTimeStr, fmt.Errorf("restore-to-time is out of time range, you can view the recoverable time: \n"+
   426  			"\tkbcli cluster describe %s -n %s", continuousBackup.Labels[constant.AppInstanceLabelKey], continuousBackup.Namespace)
   427  	}
   428  	return restoreTimeStr, nil
   429  }
   430  
   431  func setBackup(o *CreateOptions, components []map[string]interface{}) error {
   432  	backupName := o.Backup
   433  	if len(backupName) == 0 || len(components) == 0 {
   434  		return nil
   435  	}
   436  	backup := &dpv1alpha1.Backup{}
   437  	if err := cluster.GetK8SClientObject(o.Dynamic, backup, types.BackupGVR(), o.Namespace, backupName); err != nil {
   438  		return err
   439  	}
   440  	if backup.Status.Phase != dpv1alpha1.BackupPhaseCompleted {
   441  		return fmt.Errorf(`backup "%s" is not completed`, backup.Name)
   442  	}
   443  	restoreTimeStr, err := formatRestoreTimeAndValidate(o.RestoreTime, backup)
   444  	if err != nil {
   445  		return err
   446  	}
   447  	restoreAnnotation, err := getRestoreFromBackupAnnotation(backup, o.VolumeRestorePolicy, len(components), components[0]["name"].(string), restoreTimeStr)
   448  	if err != nil {
   449  		return err
   450  	}
   451  	if o.Annotations == nil {
   452  		o.Annotations = map[string]string{}
   453  	}
   454  	o.Annotations[constant.RestoreFromBackupAnnotationKey] = restoreAnnotation
   455  	return nil
   456  }
   457  
   458  func (o *CreateOptions) Validate() error {
   459  	if o.ClusterDefRef == "" {
   460  		return fmt.Errorf("a valid cluster definition is needed, use --cluster-definition to specify one, run \"kbcli clusterdefinition list\" to show all cluster definitions")
   461  	}
   462  
   463  	if o.TerminationPolicy == "" {
   464  		return fmt.Errorf("a valid termination policy is needed, use --termination-policy to specify one of: DoNotTerminate, Halt, Delete, WipeOut")
   465  	}
   466  
   467  	if err := o.validateClusterVersion(); err != nil {
   468  		return err
   469  	}
   470  
   471  	if len(o.Values) > 0 && len(o.SetFile) > 0 {
   472  		return fmt.Errorf("does not support --set and --set-file being specified at the same time")
   473  	}
   474  
   475  	matched, _ := regexp.MatchString(`^[a-z]([-a-z0-9]*[a-z0-9])?$`, o.Name)
   476  	if !matched {
   477  		return fmt.Errorf("cluster name must begin with a letter and can only contain lowercase letters, numbers, and '-'")
   478  	}
   479  
   480  	if len(o.Name) > 16 {
   481  		return fmt.Errorf("cluster name should be less than 16 characters")
   482  	}
   483  
   484  	return nil
   485  }
   486  
   487  func (o *CreateOptions) Complete() error {
   488  	var (
   489  		compByte         []byte
   490  		cls              *appsv1alpha1.Cluster
   491  		clusterCompSpecs []appsv1alpha1.ClusterComponentSpec
   492  		err              error
   493  	)
   494  
   495  	if len(o.SetFile) > 0 {
   496  		if compByte, err = MultipleSourceComponents(o.SetFile, o.IOStreams.In); err != nil {
   497  			return err
   498  		}
   499  		if compByte, err = yaml.YAMLToJSON(compByte); err != nil {
   500  			return err
   501  		}
   502  
   503  		// compatible with old file format that only specifies the components
   504  		if err = json.Unmarshal(compByte, &cls); err != nil {
   505  			if clusterCompSpecs, err = parseClusterComponentSpec(compByte); err != nil {
   506  				return err
   507  			}
   508  		} else {
   509  			clusterCompSpecs = cls.Spec.ComponentSpecs
   510  		}
   511  	}
   512  	if err = fillClusterInfoFromBackup(o, &cls); err != nil {
   513  		return err
   514  	}
   515  	if nil != cls && cls.Spec.ComponentSpecs != nil {
   516  		clusterCompSpecs = cls.Spec.ComponentSpecs
   517  	}
   518  
   519  	// if name is not specified, generate a random cluster name
   520  	if o.Name == "" {
   521  		o.Name, err = generateClusterName(o.Dynamic, o.Namespace)
   522  		if err != nil {
   523  			return err
   524  		}
   525  	}
   526  
   527  	// build annotation
   528  	o.buildAnnotation(cls)
   529  
   530  	// build cluster definition
   531  	if err := o.buildClusterDef(cls); err != nil {
   532  		return err
   533  	}
   534  
   535  	// build cluster version
   536  	o.buildClusterVersion(cls)
   537  
   538  	// build backup config
   539  	if err := o.buildBackupConfig(cls); err != nil {
   540  		return err
   541  	}
   542  
   543  	// build components
   544  	components, err := o.buildComponents(clusterCompSpecs)
   545  	if err != nil {
   546  		return err
   547  	}
   548  
   549  	setMonitor(o.MonitoringInterval, components)
   550  	if err = setBackup(o, components); err != nil {
   551  		return err
   552  	}
   553  	o.ComponentSpecs = components
   554  
   555  	// TolerationsRaw looks like `["key=engineType,value=mongo,operator=Equal,effect=NoSchedule"]` after parsing by cmd
   556  	tolerations, err := util.BuildTolerations(o.TolerationsRaw)
   557  	if err != nil {
   558  		return err
   559  	}
   560  	if len(tolerations) > 0 {
   561  		o.Tolerations = tolerations
   562  	}
   563  
   564  	// validate default storageClassName
   565  	return validateStorageClass(o.Dynamic, o.ComponentSpecs)
   566  }
   567  
   568  func (o *CreateOptions) CleanUp() error {
   569  	if o.Client == nil {
   570  		return nil
   571  	}
   572  
   573  	return deleteDependencies(o.Client, o.Namespace, o.Name)
   574  }
   575  
   576  // buildComponents builds components from file or set values
   577  func (o *CreateOptions) buildComponents(clusterCompSpecs []appsv1alpha1.ClusterComponentSpec) ([]map[string]interface{}, error) {
   578  	var (
   579  		err       error
   580  		cd        *appsv1alpha1.ClusterDefinition
   581  		compSpecs []*appsv1alpha1.ClusterComponentSpec
   582  		storages  map[string][]map[storageKey]string
   583  	)
   584  
   585  	cd, err = cluster.GetClusterDefByName(o.Dynamic, o.ClusterDefRef)
   586  	if err != nil {
   587  		return nil, err
   588  	}
   589  	clsMgr, err := classutil.GetManager(o.Dynamic, o.ClusterDefRef)
   590  	if err != nil {
   591  		return nil, err
   592  	}
   593  
   594  	compSets, err := buildCompSetsMap(o.Values, cd)
   595  	if err != nil {
   596  		return nil, err
   597  	}
   598  	if len(o.Storages) != 0 {
   599  		storages, err = buildCompStorages(o.Storages, cd)
   600  		if err != nil {
   601  			return nil, err
   602  		}
   603  	}
   604  
   605  	overrideComponentBySets := func(comp, setComp *appsv1alpha1.ClusterComponentSpec, setValues map[setKey]string) {
   606  		for k := range setValues {
   607  			switch k {
   608  			case keyReplicas:
   609  				comp.Replicas = setComp.Replicas
   610  			case keyCPU:
   611  				comp.Resources.Requests[corev1.ResourceCPU] = setComp.Resources.Requests[corev1.ResourceCPU]
   612  				comp.Resources.Limits[corev1.ResourceCPU] = setComp.Resources.Limits[corev1.ResourceCPU]
   613  			case keyClass:
   614  				comp.ClassDefRef = setComp.ClassDefRef
   615  			case keyMemory:
   616  				comp.Resources.Requests[corev1.ResourceMemory] = setComp.Resources.Requests[corev1.ResourceMemory]
   617  				comp.Resources.Limits[corev1.ResourceMemory] = setComp.Resources.Limits[corev1.ResourceMemory]
   618  			case keyStorage:
   619  				if len(comp.VolumeClaimTemplates) > 0 && len(setComp.VolumeClaimTemplates) > 0 {
   620  					comp.VolumeClaimTemplates[0].Spec.Resources.Requests = setComp.VolumeClaimTemplates[0].Spec.Resources.Requests
   621  				}
   622  			case keyStorageClass:
   623  				if len(comp.VolumeClaimTemplates) > 0 && len(setComp.VolumeClaimTemplates) > 0 {
   624  					comp.VolumeClaimTemplates[0].Spec.StorageClassName = setComp.VolumeClaimTemplates[0].Spec.StorageClassName
   625  				}
   626  			case keySwitchPolicy:
   627  				comp.SwitchPolicy = setComp.SwitchPolicy
   628  			}
   629  		}
   630  	}
   631  
   632  	if clusterCompSpecs != nil {
   633  		setsCompSpecs, err := buildClusterComp(cd, compSets, clsMgr)
   634  		if err != nil {
   635  			return nil, err
   636  		}
   637  		setsCompSpecsMap := map[string]*appsv1alpha1.ClusterComponentSpec{}
   638  		for _, setComp := range setsCompSpecs {
   639  			setsCompSpecsMap[setComp.Name] = setComp
   640  		}
   641  		for index := range clusterCompSpecs {
   642  			comp := clusterCompSpecs[index]
   643  			overrideComponentBySets(&comp, setsCompSpecsMap[comp.Name], compSets[comp.Name])
   644  			compSpecs = append(compSpecs, &comp)
   645  		}
   646  	} else {
   647  		compSpecs, err = buildClusterComp(cd, compSets, clsMgr)
   648  		if err != nil {
   649  			return nil, err
   650  		}
   651  	}
   652  
   653  	if len(storages) != 0 {
   654  		compSpecs = rebuildCompStorage(storages, compSpecs)
   655  	}
   656  
   657  	// build service reference if --service-reference not empty
   658  	if len(o.ServiceRef) != 0 {
   659  		compSpecs, err = buildServiceRefs(o.ServiceRef, cd, compSpecs)
   660  		if err != nil {
   661  			return nil, err
   662  		}
   663  	}
   664  
   665  	var comps []map[string]interface{}
   666  	for _, compSpec := range compSpecs {
   667  		// validate component classes
   668  		if err = clsMgr.ValidateResources(o.ClusterDefRef, compSpec); err != nil {
   669  			return nil, err
   670  		}
   671  
   672  		// create component dependencies
   673  		if err = o.buildDependenciesFn(cd, compSpec); err != nil {
   674  			return nil, err
   675  		}
   676  
   677  		comp, err := runtime.DefaultUnstructuredConverter.ToUnstructured(compSpec)
   678  		if err != nil {
   679  			return nil, err
   680  		}
   681  		comps = append(comps, comp)
   682  	}
   683  	return comps, nil
   684  }
   685  
   686  const (
   687  	saNamePrefix             = "kb-"
   688  	roleNamePrefix           = "kb-"
   689  	roleBindingNamePrefix    = "kb-"
   690  	clusterRolePrefix        = "kb-"
   691  	clusterRoleBindingPrefix = "kb-"
   692  )
   693  
   694  var (
   695  	rbacAPIGroup    = "rbac.authorization.k8s.io"
   696  	saKind          = "ServiceAccount"
   697  	roleKind        = "Role"
   698  	clusterRoleKind = "ClusterRole"
   699  )
   700  
   701  // buildDependenciesFn creates dependencies function for components, e.g. postgresql depends on
   702  // a service account, a role and a rolebinding
   703  func (o *CreateOptions) buildDependenciesFn(cd *appsv1alpha1.ClusterDefinition,
   704  	compSpec *appsv1alpha1.ClusterComponentSpec) error {
   705  	// set component service account name
   706  	compSpec.ServiceAccountName = saNamePrefix + o.Name
   707  	return nil
   708  }
   709  
   710  func (o *CreateOptions) CreateDependencies(dryRun []string) error {
   711  	if !o.RBACEnabled {
   712  		return nil
   713  	}
   714  
   715  	var (
   716  		ctx          = context.TODO()
   717  		labels       = buildResourceLabels(o.Name)
   718  		applyOptions = metav1.ApplyOptions{FieldManager: "kbcli", DryRun: dryRun}
   719  	)
   720  
   721  	klog.V(1).Infof("create dependencies for cluster %s", o.Name)
   722  
   723  	if err := o.createServiceAccount(ctx, labels, applyOptions); err != nil {
   724  		return err
   725  	}
   726  	if err := o.createRoleAndBinding(ctx, labels, applyOptions); err != nil {
   727  		return err
   728  	}
   729  	if err := o.createClusterRoleAndBinding(ctx, labels, applyOptions); err != nil {
   730  		return err
   731  	}
   732  	return nil
   733  }
   734  
   735  func (o *CreateOptions) createServiceAccount(ctx context.Context, labels map[string]string, opts metav1.ApplyOptions) error {
   736  	saName := saNamePrefix + o.Name
   737  	klog.V(1).Infof("create service account %s", saName)
   738  	sa := corev1ac.ServiceAccount(saName, o.Namespace).WithLabels(labels)
   739  	_, err := o.Client.CoreV1().ServiceAccounts(o.Namespace).Apply(ctx, sa, opts)
   740  	return err
   741  }
   742  
   743  func (o *CreateOptions) createRoleAndBinding(ctx context.Context, labels map[string]string, opts metav1.ApplyOptions) error {
   744  	var (
   745  		saName          = saNamePrefix + o.Name
   746  		roleName        = roleNamePrefix + o.Name
   747  		roleBindingName = roleBindingNamePrefix + o.Name
   748  	)
   749  
   750  	klog.V(1).Infof("create role %s", roleName)
   751  	role := rbacv1ac.Role(roleName, o.Namespace).WithRules([]*rbacv1ac.PolicyRuleApplyConfiguration{
   752  		{
   753  			APIGroups: []string{""},
   754  			Resources: []string{"events"},
   755  			Verbs:     []string{"create"},
   756  		},
   757  		{
   758  			APIGroups: []string{"dataprotection.kubeblocks.io"},
   759  			Resources: []string{"backups/status"},
   760  			Verbs:     []string{"get", "update", "patch"},
   761  		},
   762  		{
   763  			APIGroups: []string{"dataprotection.kubeblocks.io"},
   764  			Resources: []string{"backups"},
   765  			Verbs:     []string{"create", "get", "list", "update", "patch"},
   766  		},
   767  	}...).WithLabels(labels)
   768  
   769  	// postgresql need more rules for patroni
   770  	if ok, err := o.isPostgresqlCluster(); err != nil {
   771  		return err
   772  	} else if ok {
   773  		rules := []rbacv1ac.PolicyRuleApplyConfiguration{
   774  			{
   775  				APIGroups: []string{""},
   776  				Resources: []string{"configmaps"},
   777  				Verbs:     []string{"create", "get", "list", "patch", "update", "watch", "delete"},
   778  			},
   779  			{
   780  				APIGroups: []string{""},
   781  				Resources: []string{"endpoints"},
   782  				Verbs:     []string{"create", "get", "list", "patch", "update", "watch", "delete"},
   783  			},
   784  			{
   785  				APIGroups: []string{""},
   786  				Resources: []string{"pods"},
   787  				Verbs:     []string{"get", "list", "patch", "update", "watch"},
   788  			},
   789  		}
   790  		role.Rules = append(role.Rules, rules...)
   791  	}
   792  	if _, err := o.Client.RbacV1().Roles(o.Namespace).Apply(ctx, role, opts); err != nil {
   793  		return err
   794  	}
   795  
   796  	klog.V(1).Infof("create role binding %s", roleBindingName)
   797  	roleBinding := rbacv1ac.RoleBinding(roleBindingName, o.Namespace).WithLabels(labels).
   798  		WithSubjects([]*rbacv1ac.SubjectApplyConfiguration{
   799  			{
   800  				Kind:      &saKind,
   801  				Name:      &saName,
   802  				Namespace: &o.Namespace,
   803  			},
   804  		}...).
   805  		WithRoleRef(&rbacv1ac.RoleRefApplyConfiguration{
   806  			APIGroup: &rbacAPIGroup,
   807  			Kind:     &roleKind,
   808  			Name:     &roleName,
   809  		})
   810  	_, err := o.Client.RbacV1().RoleBindings(o.Namespace).Apply(ctx, roleBinding, opts)
   811  	return err
   812  }
   813  
   814  func (o *CreateOptions) createClusterRoleAndBinding(ctx context.Context, labels map[string]string, opts metav1.ApplyOptions) error {
   815  	var (
   816  		saName                 = saNamePrefix + o.Name
   817  		clusterRoleName        = clusterRolePrefix + o.Name
   818  		clusterRoleBindingName = clusterRoleBindingPrefix + o.Name
   819  	)
   820  
   821  	klog.V(1).Infof("create cluster role %s", clusterRoleName)
   822  	clusterRole := rbacv1ac.ClusterRole(clusterRoleName).WithRules([]*rbacv1ac.PolicyRuleApplyConfiguration{
   823  		{
   824  			APIGroups: []string{""},
   825  			Resources: []string{"nodes", "nodes/stats"},
   826  			Verbs:     []string{"get", "list"},
   827  		},
   828  	}...).WithLabels(labels)
   829  	if _, err := o.Client.RbacV1().ClusterRoles().Apply(ctx, clusterRole, opts); err != nil {
   830  		return err
   831  	}
   832  
   833  	klog.V(1).Infof("create cluster role binding %s", clusterRoleBindingName)
   834  	clusterRoleBinding := rbacv1ac.ClusterRoleBinding(clusterRoleBindingName).WithLabels(labels).
   835  		WithSubjects([]*rbacv1ac.SubjectApplyConfiguration{
   836  			{
   837  				Kind:      &saKind,
   838  				Name:      &saName,
   839  				Namespace: &o.Namespace,
   840  			},
   841  		}...).
   842  		WithRoleRef(&rbacv1ac.RoleRefApplyConfiguration{
   843  			APIGroup: &rbacAPIGroup,
   844  			Kind:     &clusterRoleKind,
   845  			Name:     &clusterRoleName,
   846  		})
   847  	_, err := o.Client.RbacV1().ClusterRoleBindings().Apply(ctx, clusterRoleBinding, opts)
   848  	return err
   849  }
   850  
   851  // MultipleSourceComponents gets component data from multiple source, such as stdin, URI and local file
   852  func MultipleSourceComponents(fileName string, in io.Reader) ([]byte, error) {
   853  	var data io.Reader
   854  	switch {
   855  	case fileName == "-":
   856  		data = in
   857  	case strings.Index(fileName, "http://") == 0 || strings.Index(fileName, "https://") == 0:
   858  		resp, err := http.Get(fileName)
   859  		if err != nil {
   860  			return nil, err
   861  		}
   862  		defer resp.Body.Close()
   863  		data = resp.Body
   864  	default:
   865  		f, err := os.Open(fileName)
   866  		if err != nil {
   867  			return nil, err
   868  		}
   869  		defer f.Close()
   870  		data = f
   871  	}
   872  	return io.ReadAll(data)
   873  }
   874  
   875  func registerFlagCompletionFunc(cmd *cobra.Command, f cmdutil.Factory) {
   876  	util.CheckErr(cmd.RegisterFlagCompletionFunc(
   877  		"cluster-definition",
   878  		func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
   879  			return utilcomp.CompGetResource(f, util.GVRToString(types.ClusterDefGVR()), toComplete), cobra.ShellCompDirectiveNoFileComp
   880  		}))
   881  	util.CheckErr(cmd.RegisterFlagCompletionFunc(
   882  		"cluster-version",
   883  		func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
   884  			var clusterVersion []string
   885  			clusterDefinition, err := cmd.Flags().GetString("cluster-definition")
   886  			if clusterDefinition == "" || err != nil {
   887  				clusterVersion = utilcomp.CompGetResource(f, util.GVRToString(types.ClusterVersionGVR()), toComplete)
   888  			} else {
   889  				label := fmt.Sprintf("%s=%s", constant.ClusterDefLabelKey, clusterDefinition)
   890  				clusterVersion = util.CompGetResourceWithLabels(f, cmd, util.GVRToString(types.ClusterVersionGVR()), []string{label}, toComplete)
   891  			}
   892  			return clusterVersion, cobra.ShellCompDirectiveNoFileComp
   893  		}))
   894  
   895  	var formatsWithDesc = map[string]string{
   896  		"JSON": "Output result in JSON format",
   897  		"YAML": "Output result in YAML format",
   898  	}
   899  	util.CheckErr(cmd.RegisterFlagCompletionFunc("output",
   900  		func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
   901  			var names []string
   902  			for format, desc := range formatsWithDesc {
   903  				if strings.HasPrefix(format, toComplete) {
   904  					names = append(names, fmt.Sprintf("%s\t%s", format, desc))
   905  				}
   906  			}
   907  			return names, cobra.ShellCompDirectiveNoFileComp
   908  		}))
   909  }
   910  
   911  // PreCreate before saving yaml to k8s, makes changes on Unstructured yaml
   912  func (o *CreateOptions) PreCreate(obj *unstructured.Unstructured) error {
   913  	c := &appsv1alpha1.Cluster{}
   914  	if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, c); err != nil {
   915  		return err
   916  	}
   917  	// get cluster definition from k8s
   918  	cd, err := cluster.GetClusterDefByName(o.Dynamic, c.Spec.ClusterDefRef)
   919  	if err != nil {
   920  		return err
   921  	}
   922  
   923  	if !o.EnableAllLogs {
   924  		setEnableAllLogs(c, cd)
   925  	}
   926  	if o.BackupConfig == nil {
   927  		// if backup config is not specified, set cluster's backup to nil
   928  		c.Spec.Backup = nil
   929  	}
   930  	data, e := runtime.DefaultUnstructuredConverter.ToUnstructured(c)
   931  	if e != nil {
   932  		return e
   933  	}
   934  	obj.SetUnstructuredContent(data)
   935  	return nil
   936  }
   937  
   938  func (o *CreateOptions) isPostgresqlCluster() (bool, error) {
   939  	cd, err := cluster.GetClusterDefByName(o.Dynamic, o.ClusterDefRef)
   940  	if err != nil {
   941  		return false, err
   942  	}
   943  
   944  	var compDef *appsv1alpha1.ClusterComponentDefinition
   945  	if cd.Spec.Type != "postgresql" {
   946  		return false, nil
   947  	}
   948  
   949  	// get cluster component definition
   950  	if len(o.ComponentSpecs) == 0 {
   951  		return false, fmt.Errorf("find no cluster component")
   952  	}
   953  	compSpec := o.ComponentSpecs[0]
   954  	for i, def := range cd.Spec.ComponentDefs {
   955  		compDefRef := compSpec["componentDefRef"]
   956  		if compDefRef != nil && def.Name == compDefRef.(string) {
   957  			compDef = &cd.Spec.ComponentDefs[i]
   958  		}
   959  	}
   960  
   961  	if compDef == nil {
   962  		return false, fmt.Errorf("failed to find component definition for component %v", compSpec["Name"])
   963  	}
   964  
   965  	// for postgresql, we need to create a service account, a role and a rolebinding
   966  	if compDef.CharacterType != "postgresql" {
   967  		return false, nil
   968  	}
   969  	return true, nil
   970  }
   971  
   972  // setEnableAllLog sets enable all logs, and ignore enabledLogs of component level.
   973  func setEnableAllLogs(c *appsv1alpha1.Cluster, cd *appsv1alpha1.ClusterDefinition) {
   974  	for idx, comCluster := range c.Spec.ComponentSpecs {
   975  		for _, com := range cd.Spec.ComponentDefs {
   976  			if !strings.EqualFold(comCluster.ComponentDefRef, com.Name) {
   977  				continue
   978  			}
   979  			typeList := make([]string, 0, len(com.LogConfigs))
   980  			for _, logConf := range com.LogConfigs {
   981  				typeList = append(typeList, logConf.Name)
   982  			}
   983  			c.Spec.ComponentSpecs[idx].EnabledLogs = typeList
   984  		}
   985  	}
   986  }
   987  
   988  func buildClusterComp(cd *appsv1alpha1.ClusterDefinition, setsMap map[string]map[setKey]string, clsMgr *class.Manager) ([]*appsv1alpha1.ClusterComponentSpec, error) {
   989  	// get value from set values and environment variables, the second return value is
   990  	// true if the value is from environment variables
   991  	getVal := func(c *appsv1alpha1.ClusterComponentDefinition, key setKey, sets map[setKey]string) string {
   992  		// get value from set values
   993  		if v := sets[key]; len(v) > 0 {
   994  			return v
   995  		}
   996  
   997  		// HACK: if user does not set by command flag, for replicationSet workload,
   998  		// set replicas to 2, for redis sentinel, set replicas to 3, cpu and memory
   999  		// to 200M and 200Mi
  1000  		// TODO: use more graceful way to set default value
  1001  		if c.WorkloadType == appsv1alpha1.Replication {
  1002  			if key == keyReplicas {
  1003  				return "2"
  1004  			}
  1005  		}
  1006  
  1007  		// the default replicas is 3 if not set by command flag, for Consensus workload
  1008  		if c.WorkloadType == appsv1alpha1.Consensus {
  1009  			if key == keyReplicas {
  1010  				return "3"
  1011  			}
  1012  		}
  1013  
  1014  		if c.CharacterType == "redis" && c.Name == "redis-sentinel" {
  1015  			switch key {
  1016  			case keyReplicas:
  1017  				return "3"
  1018  			case keyCPU:
  1019  				return "200m"
  1020  			case keyMemory:
  1021  				return "200Mi"
  1022  			}
  1023  		}
  1024  
  1025  		// get value from environment variables
  1026  		cfg := setKeyCfg[key]
  1027  		return viper.GetString(cfg)
  1028  	}
  1029  
  1030  	buildSwitchPolicy := func(c *appsv1alpha1.ClusterComponentDefinition, compObj *appsv1alpha1.ClusterComponentSpec, sets map[setKey]string) error {
  1031  		if c.WorkloadType != appsv1alpha1.Replication {
  1032  			return nil
  1033  		}
  1034  		var switchPolicyType appsv1alpha1.SwitchPolicyType
  1035  		switch getVal(c, keySwitchPolicy, sets) {
  1036  		case "Noop", "":
  1037  			switchPolicyType = appsv1alpha1.Noop
  1038  		case "MaximumAvailability":
  1039  			switchPolicyType = appsv1alpha1.MaximumAvailability
  1040  		case "MaximumPerformance":
  1041  			switchPolicyType = appsv1alpha1.MaximumDataProtection
  1042  		default:
  1043  			return fmt.Errorf("switchPolicy is illegal, only support Noop, MaximumAvailability, MaximumPerformance")
  1044  		}
  1045  		compObj.SwitchPolicy = &appsv1alpha1.ClusterSwitchPolicy{
  1046  			Type: switchPolicyType,
  1047  		}
  1048  		return nil
  1049  	}
  1050  
  1051  	var comps []*appsv1alpha1.ClusterComponentSpec
  1052  	for i, c := range cd.Spec.ComponentDefs {
  1053  		sets := setsMap[c.Name]
  1054  
  1055  		// HACK: for apecloud-mysql cluster definition, if setsMap is empty, user
  1056  		// does not specify any set, so we only build the first component.
  1057  		// TODO(ldm): remove this hack and use helm chart to render the cluster.
  1058  		if i > 0 && len(sets) == 0 && cd.Name == apeCloudMysql {
  1059  			continue
  1060  		}
  1061  
  1062  		// get replicas
  1063  		setReplicas, err := strconv.Atoi(getVal(&c, keyReplicas, sets))
  1064  		if err != nil {
  1065  			return nil, fmt.Errorf("repicas is illegal " + err.Error())
  1066  		}
  1067  		if setReplicas < 0 {
  1068  			return nil, fmt.Errorf("repicas is illegal, required value >=0")
  1069  		}
  1070  		if setReplicas > math.MaxInt32 {
  1071  			return nil, fmt.Errorf("repicas is illegal, exceed max. value (%d) ", math.MaxInt32)
  1072  		}
  1073  		replicas := int32(setReplicas)
  1074  
  1075  		compObj := &appsv1alpha1.ClusterComponentSpec{
  1076  			Name:            c.Name,
  1077  			ComponentDefRef: c.Name,
  1078  			Replicas:        replicas,
  1079  		}
  1080  
  1081  		// class has higher priority than other resource related parameters
  1082  		resourceList := make(corev1.ResourceList)
  1083  		if clsMgr.HasClass(compObj.ComponentDefRef, class.Any) {
  1084  			if className := getVal(&c, keyClass, sets); className != "" {
  1085  				clsDefRef := appsv1alpha1.ClassDefRef{}
  1086  				parts := strings.SplitN(className, ":", 2)
  1087  				if len(parts) == 1 {
  1088  					clsDefRef.Class = parts[0]
  1089  				} else {
  1090  					clsDefRef.Name = parts[0]
  1091  					clsDefRef.Class = parts[1]
  1092  				}
  1093  				compObj.ClassDefRef = &clsDefRef
  1094  			} else {
  1095  				resourceList = corev1.ResourceList{
  1096  					corev1.ResourceCPU:    resource.MustParse(getVal(&c, keyCPU, sets)),
  1097  					corev1.ResourceMemory: resource.MustParse(getVal(&c, keyMemory, sets)),
  1098  				}
  1099  			}
  1100  		} else {
  1101  			if className := getVal(&c, keyClass, sets); className != "" {
  1102  				return nil, fmt.Errorf("can not find class %s for component type %s", className, c.Name)
  1103  			}
  1104  			resourceList = corev1.ResourceList{
  1105  				corev1.ResourceCPU:    resource.MustParse(getVal(&c, keyCPU, sets)),
  1106  				corev1.ResourceMemory: resource.MustParse(getVal(&c, keyMemory, sets)),
  1107  			}
  1108  		}
  1109  		compObj.Resources = corev1.ResourceRequirements{
  1110  			Requests: resourceList,
  1111  			Limits:   resourceList,
  1112  		}
  1113  		compObj.VolumeClaimTemplates = []appsv1alpha1.ClusterComponentVolumeClaimTemplate{{
  1114  			Name: "data",
  1115  			Spec: appsv1alpha1.PersistentVolumeClaimSpec{
  1116  				AccessModes: []corev1.PersistentVolumeAccessMode{
  1117  					corev1.ReadWriteOnce,
  1118  				},
  1119  				Resources: corev1.ResourceRequirements{
  1120  					Requests: corev1.ResourceList{
  1121  						corev1.ResourceStorage: resource.MustParse(getVal(&c, keyStorage, sets)),
  1122  					},
  1123  				},
  1124  			},
  1125  		}}
  1126  		storageClass := getVal(&c, keyStorageClass, sets)
  1127  		if len(storageClass) != 0 {
  1128  			// now the clusterdefinition components mostly have only one VolumeClaimTemplates in default
  1129  			compObj.VolumeClaimTemplates[0].Spec.StorageClassName = &storageClass
  1130  		}
  1131  		if err = buildSwitchPolicy(&c, compObj, sets); err != nil {
  1132  			return nil, err
  1133  		}
  1134  		comps = append(comps, compObj)
  1135  	}
  1136  	return comps, nil
  1137  }
  1138  
  1139  // buildCompSetsMap builds the map between component definition name and its set values, if the name is not
  1140  // specified in the set, use the cluster definition default component name.
  1141  func buildCompSetsMap(values []string, cd *appsv1alpha1.ClusterDefinition) (map[string]map[setKey]string, error) {
  1142  	allSets := map[string]map[setKey]string{}
  1143  	parseKey := func(key string) setKey {
  1144  		for _, k := range setKeys() {
  1145  			if strings.EqualFold(k, key) {
  1146  				return setKey(k)
  1147  			}
  1148  		}
  1149  		return keyUnknown
  1150  	}
  1151  	buildSetMap := func(sets []string) (map[setKey]string, error) {
  1152  		res := map[setKey]string{}
  1153  		for _, set := range sets {
  1154  			kv := strings.Split(set, "=")
  1155  			if len(kv) != 2 {
  1156  				return nil, fmt.Errorf("unknown set format \"%s\", should be like key1=value1", set)
  1157  			}
  1158  
  1159  			// only record the supported key
  1160  			k := parseKey(kv[0])
  1161  			if k == keyUnknown {
  1162  				return nil, fmt.Errorf("unknown set key \"%s\", should be one of [%s]", kv[0], strings.Join(setKeys(), ","))
  1163  			}
  1164  			res[k] = kv[1]
  1165  		}
  1166  		return res, nil
  1167  	}
  1168  
  1169  	// each value corresponds to a component
  1170  	for _, value := range values {
  1171  		sets, err := buildSetMap(strings.Split(value, ","))
  1172  		if err != nil {
  1173  			return nil, err
  1174  		}
  1175  		if len(sets) == 0 {
  1176  			continue
  1177  		}
  1178  
  1179  		// get the component definition name
  1180  		compDefName := sets[keyType]
  1181  
  1182  		// type is not specified by user, use the default component definition name, now only
  1183  		// support cluster definition with one component
  1184  		if len(compDefName) == 0 {
  1185  			name, err := cluster.GetDefaultCompName(cd)
  1186  			if err != nil {
  1187  				return nil, err
  1188  			}
  1189  
  1190  			// if the number of component definitions is more than one, default use the first one and output a log
  1191  			if len(cd.Spec.ComponentDefs) > 1 {
  1192  				klog.V(1).Infof("the component is not specified, use the default component \"%s\" in cluster definition \"%s\"", name, cd.Name)
  1193  			}
  1194  			compDefName = name
  1195  		} else {
  1196  			// check the type is a valid component definition name
  1197  			valid := false
  1198  			for _, c := range cd.Spec.ComponentDefs {
  1199  				if c.Name == compDefName {
  1200  					valid = true
  1201  					break
  1202  				}
  1203  			}
  1204  			if !valid {
  1205  				return nil, fmt.Errorf("the type \"%s\" is not a valid component definition name", compDefName)
  1206  			}
  1207  		}
  1208  
  1209  		// if already set by other value, later values override earlier values
  1210  		if old, ok := allSets[compDefName]; ok {
  1211  			for k, v := range sets {
  1212  				old[k] = v
  1213  			}
  1214  			sets = old
  1215  		}
  1216  		allSets[compDefName] = sets
  1217  	}
  1218  	return allSets, nil
  1219  }
  1220  
  1221  // generateClusterName generates a random cluster name that does not exist
  1222  func generateClusterName(dynamic dynamic.Interface, namespace string) (string, error) {
  1223  	var name string
  1224  	// retry 10 times
  1225  	for i := 0; i < 10; i++ {
  1226  		name = cluster.GenerateName()
  1227  		// check whether the cluster exists, if not found, return it
  1228  		_, err := dynamic.Resource(types.ClusterGVR()).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{})
  1229  		if errors.IsNotFound(err) {
  1230  			return name, nil
  1231  		}
  1232  		if err != nil {
  1233  			return "", err
  1234  		}
  1235  	}
  1236  	return "", fmt.Errorf("failed to generate cluster name")
  1237  }
  1238  
  1239  func (f *UpdatableFlags) addFlags(cmd *cobra.Command) {
  1240  	cmd.Flags().StringVar(&f.PodAntiAffinity, "pod-anti-affinity", "Preferred", "Pod anti-affinity type, one of: (Preferred, Required)")
  1241  	cmd.Flags().Uint8Var(&f.MonitoringInterval, "monitoring-interval", 0, "The monitoring interval of cluster, 0 is disabled, the unit is second, any non-zero value means enabling monitoring.")
  1242  	cmd.Flags().BoolVar(&f.EnableAllLogs, "enable-all-logs", false, "Enable advanced application all log extraction, set to true will ignore enabledLogs of component level, default is false")
  1243  	cmd.Flags().StringVar(&f.TerminationPolicy, "termination-policy", "Delete", "Termination policy, one of: (DoNotTerminate, Halt, Delete, WipeOut)")
  1244  	cmd.Flags().StringArrayVar(&f.TopologyKeys, "topology-keys", nil, "Topology keys for affinity")
  1245  	cmd.Flags().StringToStringVar(&f.NodeLabels, "node-labels", nil, "Node label selector")
  1246  	cmd.Flags().StringSliceVar(&f.TolerationsRaw, "tolerations", nil, `Tolerations for cluster, such as "key=value:effect, key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"'`)
  1247  	cmd.Flags().StringVar(&f.Tenancy, "tenancy", "SharedNode", "Tenancy options, one of: (SharedNode, DedicatedNode)")
  1248  	cmd.Flags().BoolVar(&f.BackupEnabled, "backup-enabled", false, "Specify whether enabled automated backup")
  1249  	cmd.Flags().StringVar(&f.BackupRetentionPeriod, "backup-retention-period", "1d", "a time string ending with the 'd'|'D'|'h'|'H' character to describe how long the Backup should be retained")
  1250  	cmd.Flags().StringVar(&f.BackupMethod, "backup-method", "", "the backup method, view it by \"kbcli cd describe <cluster-definition>\", if not specified, the default backup method will be to take snapshots of the volume")
  1251  	cmd.Flags().StringVar(&f.BackupCronExpression, "backup-cron-expression", "", "the cron expression for schedule, the timezone is in UTC. see https://en.wikipedia.org/wiki/Cron.")
  1252  	cmd.Flags().Int64Var(&f.BackupStartingDeadlineMinutes, "backup-starting-deadline-minutes", 0, "the deadline in minutes for starting the backup job if it misses its scheduled time for any reason")
  1253  	cmd.Flags().StringVar(&f.BackupRepoName, "backup-repo-name", "", "the backup repository name")
  1254  	cmd.Flags().BoolVar(&f.BackupPITREnabled, "pitr-enabled", false, "Specify whether enabled point in time recovery")
  1255  
  1256  	util.CheckErr(cmd.RegisterFlagCompletionFunc(
  1257  		"termination-policy",
  1258  		func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
  1259  			return []string{
  1260  				"DoNotTerminate\tblock delete operation",
  1261  				"Halt\tdelete workload resources such as statefulset, deployment workloads but keep PVCs",
  1262  				"Delete\tbased on Halt and deletes PVCs",
  1263  				"WipeOut\tbased on Delete and wipe out all volume snapshots and snapshot data from backup storage location",
  1264  			}, cobra.ShellCompDirectiveNoFileComp
  1265  		}))
  1266  	util.CheckErr(cmd.RegisterFlagCompletionFunc(
  1267  		"pod-anti-affinity",
  1268  		func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
  1269  			return []string{
  1270  				"Preferred\ttry to spread pods of the cluster by the specified topology-keys",
  1271  				"Required\tmust spread pods of the cluster by the specified topology-keys",
  1272  			}, cobra.ShellCompDirectiveNoFileComp
  1273  		}))
  1274  	util.CheckErr(cmd.RegisterFlagCompletionFunc(
  1275  		"tenancy",
  1276  		func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
  1277  			return []string{
  1278  				"SharedNode\tpods of the cluster may share the same node",
  1279  				"DedicatedNode\teach pod of the cluster will run on their own dedicated node",
  1280  			}, cobra.ShellCompDirectiveNoFileComp
  1281  		}))
  1282  }
  1283  
  1284  // validateStorageClass checks the existence of declared StorageClasses in volume claim templates,
  1285  // if not set, check the existence of the default StorageClasses
  1286  func validateStorageClass(dynamic dynamic.Interface, components []map[string]interface{}) error {
  1287  	existedStorageClasses, existedDefault, err := getStorageClasses(dynamic)
  1288  	if err != nil {
  1289  		return err
  1290  	}
  1291  	for _, comp := range components {
  1292  		compObj := appsv1alpha1.ClusterComponentSpec{}
  1293  		err = runtime.DefaultUnstructuredConverter.FromUnstructured(comp, &compObj)
  1294  		if err != nil {
  1295  			return err
  1296  		}
  1297  		for _, vct := range compObj.VolumeClaimTemplates {
  1298  			name := vct.Spec.StorageClassName
  1299  			if name != nil {
  1300  				// validate the specified StorageClass whether exist
  1301  				if _, ok := existedStorageClasses[*name]; !ok {
  1302  					return fmt.Errorf("failed to find the specified storageClass \"%s\"", *name)
  1303  				}
  1304  			} else if !existedDefault {
  1305  				// validate the default StorageClass
  1306  				return fmt.Errorf("failed to find the default storageClass, use '--set storageClass=NAME' to set it")
  1307  			}
  1308  		}
  1309  	}
  1310  	return nil
  1311  }
  1312  
  1313  // getStorageClasses returns all StorageClasses in K8S and return true if the cluster have a default StorageClasses
  1314  func getStorageClasses(dynamic dynamic.Interface) (map[string]struct{}, bool, error) {
  1315  	gvr := types.StorageClassGVR()
  1316  	allStorageClasses := make(map[string]struct{})
  1317  	existedDefault := false
  1318  	list, err := dynamic.Resource(gvr).List(context.Background(), metav1.ListOptions{})
  1319  	if err != nil {
  1320  		return nil, false, err
  1321  	}
  1322  	for _, item := range list.Items {
  1323  		allStorageClasses[item.GetName()] = struct{}{}
  1324  		annotations := item.GetAnnotations()
  1325  		if !existedDefault && annotations != nil && (annotations[storage.IsDefaultStorageClassAnnotation] == annotationTrueValue || annotations[storage.BetaIsDefaultStorageClassAnnotation] == annotationTrueValue) {
  1326  			existedDefault = true
  1327  		}
  1328  	}
  1329  	// for cloud k8s we will check the kubeblocks-manager-config
  1330  	if existedDefault {
  1331  		return allStorageClasses, existedDefault, nil
  1332  	}
  1333  	existedDefault, err = validateDefaultSCInConfig(dynamic)
  1334  	return allStorageClasses, existedDefault, err
  1335  }
  1336  
  1337  // validateClusterVersion checks the existence of declared cluster version,
  1338  // if not set, check the existence of default cluster version
  1339  func (o *CreateOptions) validateClusterVersion() error {
  1340  	var err error
  1341  
  1342  	// cluster version is specified, validate if exists
  1343  	if o.ClusterVersionRef != "" {
  1344  		if err = cluster.ValidateClusterVersion(o.Dynamic, o.ClusterDefRef, o.ClusterVersionRef); err != nil {
  1345  			return fmt.Errorf("cluster version \"%s\" does not exist, run following command to get the available cluster versions\n\tkbcli cv list --cluster-definition=%s",
  1346  				o.ClusterVersionRef, o.ClusterDefRef)
  1347  		}
  1348  		return nil
  1349  	}
  1350  
  1351  	// cluster version is not specified, get the default cluster version
  1352  	if o.ClusterVersionRef, err = cluster.GetDefaultVersion(o.Dynamic, o.ClusterDefRef); err != nil {
  1353  		return err
  1354  	}
  1355  
  1356  	dryRun, err := o.GetDryRunStrategy()
  1357  	if err != nil {
  1358  		return err
  1359  	}
  1360  	// if dryRun is set, run in quiet mode, avoid to output yaml file with the info
  1361  	if dryRun != create.DryRunNone {
  1362  		return nil
  1363  	}
  1364  
  1365  	fmt.Fprintf(o.Out, "Info: --cluster-version is not specified, ClusterVersion %s is applied by default\n", o.ClusterVersionRef)
  1366  	return nil
  1367  }
  1368  
  1369  func buildResourceLabels(clusterName string) map[string]string {
  1370  	return map[string]string{
  1371  		constant.AppInstanceLabelKey:  clusterName,
  1372  		constant.AppManagedByLabelKey: "kbcli",
  1373  	}
  1374  }
  1375  
  1376  // build the cluster definition
  1377  // if the cluster definition is not specified, pick the cluster definition in the cluster component
  1378  // if neither of them is specified, return an error
  1379  func (o *CreateOptions) buildClusterDef(cls *appsv1alpha1.Cluster) error {
  1380  	if o.ClusterDefRef != "" {
  1381  		return nil
  1382  	}
  1383  
  1384  	if cls != nil && cls.Spec.ClusterDefRef != "" {
  1385  		o.ClusterDefRef = cls.Spec.ClusterDefRef
  1386  		return nil
  1387  	}
  1388  
  1389  	return fmt.Errorf("a valid cluster definition is needed, use --cluster-definition to specify one, run \"kbcli clusterdefinition list\" to show all cluster definitions")
  1390  }
  1391  
  1392  // build the cluster version
  1393  // if the cluster version is not specified, pick the cluster version in the cluster component
  1394  // if neither of them is specified, pick default cluster version
  1395  func (o *CreateOptions) buildClusterVersion(cls *appsv1alpha1.Cluster) {
  1396  	if o.ClusterVersionRef != "" {
  1397  		return
  1398  	}
  1399  
  1400  	if cls != nil && cls.Spec.ClusterVersionRef != "" {
  1401  		o.ClusterVersionRef = cls.Spec.ClusterVersionRef
  1402  	}
  1403  }
  1404  
  1405  func (o *CreateOptions) buildAnnotation(cls *appsv1alpha1.Cluster) {
  1406  	if cls == nil {
  1407  		return
  1408  	}
  1409  
  1410  	if o.Annotations == nil {
  1411  		o.Annotations = cls.Annotations
  1412  	}
  1413  }
  1414  
  1415  func (o *CreateOptions) buildBackupConfig(cls *appsv1alpha1.Cluster) error {
  1416  	// if the cls.Backup isn't nil, use the backup config in cluster
  1417  	if cls != nil && cls.Spec.Backup != nil {
  1418  		o.BackupConfig = cls.Spec.Backup
  1419  	}
  1420  
  1421  	// check the flag is ser by user or not
  1422  	var flags []*pflag.Flag
  1423  	if o.Cmd != nil {
  1424  		o.Cmd.Flags().Visit(func(flag *pflag.Flag) {
  1425  			// only check the backup flags
  1426  			if flag.Name == "backup-enabled" || flag.Name == "backup-retention-period" ||
  1427  				flag.Name == "backup-method" || flag.Name == "backup-cron-expression" ||
  1428  				flag.Name == "backup-starting-deadline-minutes" || flag.Name == "backup-repo-name" ||
  1429  				flag.Name == "pitr-enabled" {
  1430  				flags = append(flags, flag)
  1431  			}
  1432  		})
  1433  	}
  1434  
  1435  	// must set backup method when set backup config in cli
  1436  	if len(flags) > 0 {
  1437  		if o.BackupConfig == nil {
  1438  			o.BackupConfig = &appsv1alpha1.ClusterBackup{}
  1439  		}
  1440  
  1441  		// get default backup method and all backup methods
  1442  		defaultBackupMethod, backupMethodsMap, err := getBackupMethodsFromBackupPolicyTemplates(o.Dynamic, o.ClusterDefRef)
  1443  		if err != nil {
  1444  			return err
  1445  		}
  1446  
  1447  		// if backup method is empty in backup config, use the default backup method
  1448  		if o.BackupConfig.Method == "" {
  1449  			o.BackupConfig.Method = defaultBackupMethod
  1450  		}
  1451  
  1452  		// if the flag is set by user, use the flag value
  1453  		for _, flag := range flags {
  1454  			switch flag.Name {
  1455  			case "backup-enabled":
  1456  				o.BackupConfig.Enabled = &o.BackupEnabled
  1457  			case "backup-retention-period":
  1458  				o.BackupConfig.RetentionPeriod = dpv1alpha1.RetentionPeriod(o.BackupRetentionPeriod)
  1459  			case "backup-method":
  1460  				if _, ok := backupMethodsMap[o.BackupMethod]; !ok {
  1461  					return fmt.Errorf("backup method %s is not supported, please view supported backup methods by \"kbcli cd describe %s\"", o.BackupMethod, o.ClusterDefRef)
  1462  				}
  1463  				o.BackupConfig.Method = o.BackupMethod
  1464  			case "backup-cron-expression":
  1465  				if _, err := cron.ParseStandard(o.BackupCronExpression); err != nil {
  1466  					return fmt.Errorf("invalid cron expression: %s, please see https://en.wikipedia.org/wiki/Cron", o.BackupCronExpression)
  1467  				}
  1468  				o.BackupConfig.CronExpression = o.BackupCronExpression
  1469  			case "backup-starting-deadline-minutes":
  1470  				o.BackupConfig.StartingDeadlineMinutes = &o.BackupStartingDeadlineMinutes
  1471  			case "backup-repo-name":
  1472  				o.BackupConfig.RepoName = o.BackupRepoName
  1473  			case "pitr-enabled":
  1474  				o.BackupConfig.PITREnabled = &o.BackupPITREnabled
  1475  			}
  1476  		}
  1477  	}
  1478  
  1479  	return nil
  1480  }
  1481  
  1482  // get backup methods from backup policy template
  1483  // if method's snapshotVolumes is true, use the method as default method
  1484  func getBackupMethodsFromBackupPolicyTemplates(dynamic dynamic.Interface, clusterDefRef string) (string, map[string]struct{}, error) {
  1485  	var backupPolicyTemplates []appsv1alpha1.BackupPolicyTemplate
  1486  	var defaultBackupPolicyTemplate appsv1alpha1.BackupPolicyTemplate
  1487  
  1488  	obj, err := dynamic.Resource(types.BackupPolicyTemplateGVR()).List(context.TODO(), metav1.ListOptions{
  1489  		LabelSelector: fmt.Sprintf("%s=%s", constant.ClusterDefLabelKey, clusterDefRef),
  1490  	})
  1491  	if err != nil {
  1492  		return "", nil, err
  1493  	}
  1494  	for _, item := range obj.Items {
  1495  		var backupPolicyTemplate appsv1alpha1.BackupPolicyTemplate
  1496  		err = runtime.DefaultUnstructuredConverter.FromUnstructured(item.Object, &backupPolicyTemplate)
  1497  		if err != nil {
  1498  			return "", nil, err
  1499  		}
  1500  		backupPolicyTemplates = append(backupPolicyTemplates, backupPolicyTemplate)
  1501  	}
  1502  
  1503  	if len(backupPolicyTemplates) == 0 {
  1504  		return "", nil, fmt.Errorf("failed to find backup policy template for cluster definition %s", clusterDefRef)
  1505  	}
  1506  	// if there is only one backup policy template, use it as default backup policy template
  1507  	if len(backupPolicyTemplates) == 1 {
  1508  		defaultBackupPolicyTemplate = backupPolicyTemplates[0]
  1509  	}
  1510  	for _, backupPolicyTemplate := range backupPolicyTemplates {
  1511  		if backupPolicyTemplate.Annotations[dptypes.DefaultBackupPolicyTemplateAnnotationKey] == annotationTrueValue {
  1512  			defaultBackupPolicyTemplate = backupPolicyTemplate
  1513  			break
  1514  		}
  1515  	}
  1516  
  1517  	var defaultBackupMethod string
  1518  	var backupMethodsMap = make(map[string]struct{})
  1519  	for _, policy := range defaultBackupPolicyTemplate.Spec.BackupPolicies {
  1520  		for _, method := range policy.BackupMethods {
  1521  			if boolptr.IsSetToTrue(method.SnapshotVolumes) {
  1522  				defaultBackupMethod = method.Name
  1523  			}
  1524  			backupMethodsMap[method.Name] = struct{}{}
  1525  		}
  1526  	}
  1527  	if defaultBackupMethod == "" {
  1528  		return "", nil, fmt.Errorf("failed to find default backup method which snapshotVolumes is true, please check backup policy template for cluster definition %s", clusterDefRef)
  1529  	}
  1530  	return defaultBackupMethod, backupMethodsMap, nil
  1531  }
  1532  
  1533  // parse the cluster component spec
  1534  // compatible with old file format that only specifies the components
  1535  func parseClusterComponentSpec(compByte []byte) ([]appsv1alpha1.ClusterComponentSpec, error) {
  1536  	var compSpecs []appsv1alpha1.ClusterComponentSpec
  1537  	var comps []map[string]interface{}
  1538  	if err := json.Unmarshal(compByte, &comps); err != nil {
  1539  		return nil, err
  1540  	}
  1541  	for _, comp := range comps {
  1542  		var compSpec appsv1alpha1.ClusterComponentSpec
  1543  		if err := runtime.DefaultUnstructuredConverter.FromUnstructured(comp, &compSpec); err != nil {
  1544  			return nil, err
  1545  		}
  1546  		compSpecs = append(compSpecs, compSpec)
  1547  	}
  1548  
  1549  	return compSpecs, nil
  1550  }
  1551  
  1552  func setKeys() []string {
  1553  	return []string{
  1554  		string(keyCPU),
  1555  		string(keyType),
  1556  		string(keyStorage),
  1557  		string(keyMemory),
  1558  		string(keyReplicas),
  1559  		string(keyClass),
  1560  		string(keyStorageClass),
  1561  		string(keySwitchPolicy),
  1562  	}
  1563  }
  1564  
  1565  func storageSetKey() []string {
  1566  	return []string{
  1567  		string(storageKeyType),
  1568  		string(storageKeyName),
  1569  		string(storageKeyStorageClass),
  1570  		string(storageAccessMode),
  1571  		string(storageKeySize),
  1572  	}
  1573  }
  1574  
  1575  // validateDefaultSCInConfig will verify if the ConfigMap of Kubeblocks is configured with the DEFAULT_STORAGE_CLASS.
  1576  // When we install Kubeblocks, certain configurations will be rendered in a ConfigMap named kubeblocks-manager-config.
  1577  // You can find the details in deploy/helm/template/configmap.yaml.
  1578  func validateDefaultSCInConfig(dynamic dynamic.Interface) (bool, error) {
  1579  	// todo:  types.KubeBlocksManagerConfigMapName almost is hard code, add a unique label for kubeblocks-manager-config
  1580  	namespace, err := util.GetKubeBlocksNamespaceByDynamic(dynamic)
  1581  	if err != nil {
  1582  		return false, err
  1583  	}
  1584  	cfg, err := dynamic.Resource(types.ConfigmapGVR()).Namespace(namespace).Get(context.Background(), types.KubeBlocksManagerConfigMapName, metav1.GetOptions{})
  1585  	if err != nil {
  1586  		return false, err
  1587  	}
  1588  	var config map[string]interface{}
  1589  	if cfg.Object["data"] == nil {
  1590  		return false, nil
  1591  	}
  1592  	data := cfg.Object["data"].(map[string]interface{})
  1593  	if data["config.yaml"] == nil {
  1594  		return false, nil
  1595  	}
  1596  	err = yaml.Unmarshal([]byte(data["config.yaml"].(string)), &config)
  1597  	if err != nil {
  1598  		return false, err
  1599  	}
  1600  	if config["DEFAULT_STORAGE_CLASS"] == nil {
  1601  		return false, nil
  1602  	}
  1603  	return len(config["DEFAULT_STORAGE_CLASS"].(string)) != 0, nil
  1604  }
  1605  
  1606  // buildCompStorages will override the storage configurations by --set, and it fixes out the case where there are multiple pvc's in a component
  1607  func buildCompStorages(pvcs []string, cd *appsv1alpha1.ClusterDefinition) (map[string][]map[storageKey]string, error) {
  1608  	pvcSets := map[string][]map[storageKey]string{}
  1609  	parseKey := func(key string) storageKey {
  1610  		for _, k := range storageSetKey() {
  1611  			if strings.EqualFold(k, key) {
  1612  				return storageKey(k)
  1613  			}
  1614  		}
  1615  		return storageKeyUnknown
  1616  	}
  1617  
  1618  	buildPVCMap := func(sets []string) (map[storageKey]string, error) {
  1619  		res := map[storageKey]string{}
  1620  		for _, set := range sets {
  1621  			kv := strings.Split(set, "=")
  1622  			if len(kv) != 2 {
  1623  				return nil, fmt.Errorf("unknown --pvc format \"%s\", should be like key1=value1", set)
  1624  			}
  1625  
  1626  			// only record the supported key
  1627  			k := parseKey(kv[0])
  1628  			if k == storageKeyUnknown {
  1629  				return nil, fmt.Errorf("unknown --pvc key \"%s\", should be one of [%s]", kv[0], strings.Join(storageSetKey(), ","))
  1630  			}
  1631  			res[k] = kv[1]
  1632  		}
  1633  		return res, nil
  1634  	}
  1635  
  1636  	for _, pvc := range pvcs {
  1637  		pvcMap, err := buildPVCMap(strings.Split(pvc, ","))
  1638  		if err != nil {
  1639  			return nil, err
  1640  		}
  1641  		if len(pvcMap) == 0 {
  1642  			continue
  1643  		}
  1644  		compDefName := pvcMap[storageKeyType]
  1645  
  1646  		// type is not specified by user, use the default component definition name, now only
  1647  		// support cluster definition with one component
  1648  		if len(compDefName) == 0 {
  1649  			name, err := cluster.GetDefaultCompName(cd)
  1650  			if err != nil {
  1651  				return nil, err
  1652  			}
  1653  
  1654  			// if the number of component definitions is more than one, default use the first one and output a log
  1655  			if len(cd.Spec.ComponentDefs) > 1 {
  1656  				klog.V(1).Infof("the component is not specified, use the default component \"%s\" in cluster definition \"%s\"", name, cd.Name)
  1657  			}
  1658  			compDefName = name
  1659  		} else {
  1660  			// check the type is a valid component definition name
  1661  			valid := false
  1662  			for _, c := range cd.Spec.ComponentDefs {
  1663  				if c.Name == compDefName {
  1664  					valid = true
  1665  					break
  1666  				}
  1667  			}
  1668  			if !valid {
  1669  				return nil, fmt.Errorf("the type \"%s\" is not a valid component definition name", compDefName)
  1670  			}
  1671  		}
  1672  
  1673  		pvcSets[compDefName] = append(pvcSets[compDefName], pvcMap)
  1674  	}
  1675  	return pvcSets, nil
  1676  }
  1677  
  1678  // rebuildCompStorage will rewrite the cluster component specs with the values in pvcMaps
  1679  func rebuildCompStorage(pvcMaps map[string][]map[storageKey]string, specs []*appsv1alpha1.ClusterComponentSpec) []*appsv1alpha1.ClusterComponentSpec {
  1680  	validateAccessMode := func(mode string) bool {
  1681  		return mode == string(corev1.ReadWriteOnce) || mode == string(corev1.ReadOnlyMany) || mode == string(corev1.ReadWriteMany) || mode == string(corev1.ReadWriteOncePod)
  1682  	}
  1683  
  1684  	// todo: now each ClusterComponentVolumeClaimTemplate can only set one AccessModes
  1685  	buildClusterComponentVolumeClaimTemplate := func(storageSet map[storageKey]string) appsv1alpha1.ClusterComponentVolumeClaimTemplate {
  1686  		// set the default value
  1687  		res := appsv1alpha1.ClusterComponentVolumeClaimTemplate{
  1688  			Name: cluster.GenerateName(),
  1689  			Spec: appsv1alpha1.PersistentVolumeClaimSpec{
  1690  				AccessModes: []corev1.PersistentVolumeAccessMode{
  1691  					corev1.ReadWriteOnce,
  1692  				},
  1693  				Resources: corev1.ResourceRequirements{
  1694  					Requests: corev1.ResourceList{
  1695  						corev1.ResourceStorage: resource.MustParse(viper.GetString(types.CfgKeyClusterDefaultStorageSize)),
  1696  					},
  1697  				},
  1698  			},
  1699  		}
  1700  		if name, ok := storageSet[storageKeyName]; ok {
  1701  			res.Name = name
  1702  		}
  1703  		if accessMode, ok := storageSet[storageAccessMode]; ok {
  1704  			if validateAccessMode(accessMode) {
  1705  				res.Spec.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.PersistentVolumeAccessMode(accessMode)}
  1706  			} else {
  1707  				fmt.Printf("Warning: PV access dode %s is invalid, use `ReadWriteOnce` by default", accessMode)
  1708  			}
  1709  		}
  1710  		if storageClass, ok := storageSet[storageKeyStorageClass]; ok {
  1711  			res.Spec.StorageClassName = &storageClass
  1712  		}
  1713  		if storageSize, ok := storageSet[storageKeySize]; ok {
  1714  			res.Spec.Resources = corev1.ResourceRequirements{
  1715  				Requests: corev1.ResourceList{
  1716  					corev1.ResourceStorage: resource.MustParse(storageSize),
  1717  				},
  1718  			}
  1719  		}
  1720  		return res
  1721  	}
  1722  
  1723  	for componentNames, pvcs := range pvcMaps {
  1724  		var compPvcs []appsv1alpha1.ClusterComponentVolumeClaimTemplate
  1725  		for i := range pvcs {
  1726  			compPvcs = append(compPvcs, buildClusterComponentVolumeClaimTemplate(pvcs[i]))
  1727  		}
  1728  		for i := range specs {
  1729  			if specs[i].Name == componentNames {
  1730  				specs[i].VolumeClaimTemplates = compPvcs
  1731  			}
  1732  		}
  1733  	}
  1734  	return specs
  1735  }
  1736  
  1737  // serviceRefKey declares --service-reference validate keyword
  1738  type serviceRefKey string
  1739  
  1740  const (
  1741  	serviceRefKeyName      serviceRefKey = "name"
  1742  	serviceRefKeyCluster   serviceRefKey = "cluster"
  1743  	serviceRefKeyNamespace serviceRefKey = "namespace"
  1744  	serviceRefKeyUnknown   serviceRefKey = "unknown"
  1745  )
  1746  
  1747  func serviceRefSetKey() []string {
  1748  	return []string{
  1749  		string(serviceRefKeyName),
  1750  		string(serviceRefKeyCluster),
  1751  		string(serviceRefKeyNamespace),
  1752  	}
  1753  }
  1754  
  1755  // getServiceRefs parses the serviceRef from flag --service-reference and performs basic validation, then return all valid serviceRefs
  1756  func getServiceRefs(serviceRef []string, cd *appsv1alpha1.ClusterDefinition) ([]map[serviceRefKey]string, error) {
  1757  	var serviceRefSets []map[serviceRefKey]string
  1758  
  1759  	parseKey := func(key string) serviceRefKey {
  1760  		for _, k := range serviceRefSetKey() {
  1761  			if strings.EqualFold(k, key) {
  1762  				return serviceRefKey(k)
  1763  			}
  1764  		}
  1765  		return serviceRefKeyUnknown
  1766  	}
  1767  
  1768  	buildServiceRefMap := func(sets []string) (map[serviceRefKey]string, error) {
  1769  		res := map[serviceRefKey]string{}
  1770  		for _, set := range sets {
  1771  			kv := strings.Split(set, "=")
  1772  			if len(kv) != 2 {
  1773  				return nil, fmt.Errorf("unknown --service-reference format \"%s\", should be like key1=value1", set)
  1774  			}
  1775  
  1776  			// only record the supported key
  1777  			k := parseKey(kv[0])
  1778  			if k == serviceRefKeyUnknown {
  1779  				return nil, fmt.Errorf("unknown --service-reference key \"%s\", should be one of [%s]", kv[0], strings.Join(serviceRefSetKey(), ","))
  1780  			}
  1781  			res[k] = kv[1]
  1782  		}
  1783  		return res, nil
  1784  	}
  1785  
  1786  	for _, ref := range serviceRef {
  1787  		refMap, err := buildServiceRefMap(strings.Split(ref, ","))
  1788  		if err != nil {
  1789  			return nil, err
  1790  		}
  1791  		if len(refMap) == 0 {
  1792  			continue
  1793  		}
  1794  		serviceRefName := refMap[serviceRefKeyName]
  1795  
  1796  		if len(serviceRefName) == 0 {
  1797  			name, err := cluster.GetDefaultServiceRef(cd)
  1798  			if err != nil {
  1799  				return nil, err
  1800  			}
  1801  			refMap[serviceRefKeyName] = name
  1802  		} else {
  1803  			// check if the serviceRefName is defined in the cluster-definition
  1804  			valid := false
  1805  			for _, c := range cluster.GetServiceRefs(cd) {
  1806  				if c == serviceRefName {
  1807  					valid = true
  1808  					break
  1809  				}
  1810  			}
  1811  			if !valid {
  1812  				// todo:  kbcli cluster list-serviceRef
  1813  				return nil, fmt.Errorf("the service reference name \"%s\" is not declared in the cluster components,use `kbcli cluster list-serviceRef %s` to show all available service reference names", serviceRefName, cd.Name)
  1814  			}
  1815  		}
  1816  		serviceRefSets = append(serviceRefSets, refMap)
  1817  	}
  1818  	return serviceRefSets, nil
  1819  }
  1820  
  1821  // buildServiceRefs supplements the serviceRef content for compSpecs based on the input flags --service-reference
  1822  func buildServiceRefs(serviceRef []string, cd *appsv1alpha1.ClusterDefinition, compSpecs []*appsv1alpha1.ClusterComponentSpec) ([]*appsv1alpha1.ClusterComponentSpec, error) {
  1823  	refSet, err := getServiceRefs(serviceRef, cd)
  1824  	if err != nil {
  1825  		return nil, err
  1826  	}
  1827  	// Check if the ServiceRefDeclarations for the current refName have been declared in the cluster-definition corresponding to comp
  1828  	check := func(comp *appsv1alpha1.ClusterComponentSpec, refName string) bool {
  1829  		valid := false
  1830  		for _, cdSpec := range cd.Spec.ComponentDefs {
  1831  			if cdSpec.Name != comp.Name {
  1832  				continue
  1833  			}
  1834  			for i := range cdSpec.ServiceRefDeclarations {
  1835  				if cdSpec.ServiceRefDeclarations[i].Name == refName {
  1836  					// serviceRefName has been declared in cluster-definition
  1837  					valid = true
  1838  					break
  1839  				}
  1840  			}
  1841  		}
  1842  		return valid
  1843  	}
  1844  
  1845  	for _, ref := range refSet {
  1846  		name := ref[serviceRefKeyName]
  1847  		for i, comp := range compSpecs {
  1848  			if !check(comp, name) {
  1849  				continue
  1850  			}
  1851  			// if cluster-definition ComponentDef have the correct ServiceRefDeclarations,add the ServiceRefs to the cluster compSpecs
  1852  			if compSpecs[i].ServiceRefs == nil {
  1853  				compSpecs[i].ServiceRefs = []appsv1alpha1.ServiceRef{
  1854  					{
  1855  						Name:      ref[serviceRefKeyName],
  1856  						Namespace: ref[serviceRefKeyNamespace],
  1857  						Cluster:   ref[serviceRefKeyCluster],
  1858  					},
  1859  				}
  1860  			} else {
  1861  				compSpecs[i].ServiceRefs = append(compSpecs[i].ServiceRefs,
  1862  					appsv1alpha1.ServiceRef{
  1863  						Name:      ref[serviceRefKeyName],
  1864  						Namespace: ref[serviceRefKeyNamespace],
  1865  						Cluster:   ref[serviceRefKeyCluster],
  1866  					})
  1867  			}
  1868  		}
  1869  	}
  1870  	return compSpecs, nil
  1871  }