github.com/opentelekomcloud/gophertelekomcloud@v0.9.3/openstack/mrs/v1/cluster/Create.go (about)

     1  package cluster
     2  
     3  import (
     4  	golangsdk "github.com/opentelekomcloud/gophertelekomcloud"
     5  	"github.com/opentelekomcloud/gophertelekomcloud/internal/build"
     6  	"github.com/opentelekomcloud/gophertelekomcloud/internal/extract"
     7  	"github.com/opentelekomcloud/gophertelekomcloud/openstack/common/tags"
     8  )
     9  
    10  type CreateOpts struct {
    11  	// Cluster billing mode.
    12  	// Set this parameter to 12.
    13  	BillingType int `json:"billing_type"`
    14  	// Region of the cluster.
    15  	DataCenter string `json:"data_center"`
    16  	// AZ ID.
    17  	// - AZ1(eude-01):bf84aba586ce4e948da0b97d9a7d62fb
    18  	// - AZ2(eude-02):bf84aba586ce4e948da0b97d9a7d62fc
    19  	AvailableZoneId string `json:"available_zone_id"`
    20  	// Cluster name.
    21  	// It must be unique.
    22  	// It contains only 1 to 64 characters.
    23  	// Only letters, digits, hyphens (-), and underscores (_) are allowed.
    24  	ClusterName string `json:"cluster_name"`
    25  	// Name of the VPC where the subnet locates.
    26  	// Perform the following operations to obtain the VPC name from the VPC management console:
    27  	// 1. Log in to the management console.
    28  	// 2. Click Virtual Private Cloud and select Virtual Private Cloud from the left list.
    29  	// On the Virtual Private Cloud page, obtain the VPC name from the list.
    30  	Vpc string `json:"vpc"`
    31  	// ID of the VPC where the subnet locates
    32  	// Perform the following operations to obtain the VPC ID from the VPC management console:
    33  	// 1. Log in to the management console.
    34  	// 2. Click Virtual Private Cloud and select Virtual Private Cloud from the left list.
    35  	// On the Virtual Private Cloud page, obtain the VPC ID from the list.
    36  	VpcId string `json:"vpc_id"`
    37  	// Network ID
    38  	// Perform the following operations to obtain the network ID of the  VPC from the VPC management console:
    39  	// 1. Log in to the management console.
    40  	// 2. Click Virtual Private Cloud and select Virtual Private Cloud from the left list.
    41  	// On the Virtual Private Cloud page, obtain the network ID of the VPC from the list.
    42  	SubnetId string `json:"subnet_id"`
    43  	// Subnet name
    44  	// Perform the following operations to obtain the subnet name from the VPC management console:
    45  	// 1. Log in to the management console.
    46  	// 2. Click Virtual Private Cloud and select Virtual Private Cloud from the left list.
    47  	// On the Virtual Private Cloud page, obtain the subnet name of the VPC from the list.
    48  	SubnetName string `json:"subnet_name"`
    49  	// Security group ID of the cluster
    50  	// - If this parameter is left blank,
    51  	// MRS automatically creates a security group, whose name starts with mrs_{cluster_name}.
    52  	// - If this parameter is not left blank, a fixed security group is used to create a cluster.
    53  	// The transferred ID must be the security group ID owned by the current tenant.
    54  	// The security group must include an inbound rule in
    55  	// which all protocols and all ports are allowed and
    56  	// the source is the IP address of the specified node on the management plane.
    57  	SecurityGroupsId string `json:"security_groups_id,omitempty"`
    58  	// Cluster tag
    59  	// - A cluster allows a maximum of 10 tags. A tag name (key) must be unique in a cluster.
    60  	// - A tag key or value cannot contain the following special characters: =*<>\,|/
    61  	Tags []tags.ResourceTag `json:"tags,omitempty"`
    62  	// Cluster version
    63  	// Possible values are as follows:
    64  	// - MRS 1.6.3
    65  	// - MRS 1.7.2
    66  	// - MRS 1.9.2
    67  	// - MRS 2.1.0
    68  	// - MRS 3.1.0-LTS.1
    69  	// - MRS 3.1.2-LTS.3
    70  	ClusterVersion string `json:"cluster_version"`
    71  	// Cluster type
    72  	// - 0: analysis cluster
    73  	// - 1: streaming cluster
    74  	// The default value is 0.
    75  	// Note: Currently, hybrid clusters cannot be created using APIs.
    76  	ClusterType *int `json:"cluster_type,omitempty"`
    77  	// Running mode of an MRS cluster
    78  	// - 0: normal cluster.
    79  	// In a normal cluster, Kerberos authentication is disabled,
    80  	// and users can use all functions provided by the cluster.
    81  	// - 1: security cluster.
    82  	// In a security cluster, Kerberos authentication is enabled,
    83  	// and common users cannot use the file management
    84  	// and job management functions of an MRS cluster or view cluster resource usage
    85  	// and the job records of Hadoop and Spark.
    86  	// To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
    87  	// NOTE
    88  	// For MRS 1.7.2 or earlier,
    89  	// the request body contains the cluster_admin_secret field only when safe_mode is set to 1.
    90  	SafeMode int `json:"safe_mode"`
    91  	// Password of the MRS Manager administrator
    92  	// - Must contain 8 to 32 characters.
    93  	// - Must contain at least three of
    94  	// the following:
    95  	// 	– Lowercase letters
    96  	// 	– Uppercase letters
    97  	// 	– Digits
    98  	// 	– Special characters: `~!@#$ %^&*()-_=+\|[{}];:'",<.>/? and space
    99  	// - Cannot be the username or the username spelled backwards.
   100  	// NOTE
   101  	// For MRS 1.7.2 or earlier, this parameter is mandatory only when safe_mode is set to 1.
   102  	ClusterAdminSecret string `json:"cluster_admin_secret,omitempty"`
   103  	// Cluster login mode
   104  	// - 0: password
   105  	// - 1: key pair
   106  	// The default value is 1.
   107  	// - If login_mode is set to 0, the request body contains the cluster_master_secret field.
   108  	// - If login_mode is set to 1, the request body contains the node_public_cert_name field.
   109  	// NOTE
   110  	// This parameter is valid only for clusters of MRS 1.6.3
   111  	// or later instead of clusters of versions earlier than MRS 1.6.3.
   112  	LoginMode *int `json:"login_mode,omitempty"`
   113  	// Password of user root for logging in to a cluster node
   114  	// If login_mode is set to 0, the request body contains the cluster_master_secret field.
   115  	// A password must meet the following requirements:
   116  	// - Must be 8 to 26 characters long.
   117  	// - Must contain at least three of the following:
   118  	// uppercase letters, lowercase letters, digits,
   119  	// and special characters (!@$%^-_=+[{}]:,./?), but must not contain spaces.
   120  	// - Cannot be the username or the username spelled backwards
   121  	ClusterMasterSecret string `json:"cluster_master_secret"`
   122  	// Name of a key pair You can use a key pair to log in to the Master node in the cluster.
   123  	// If login_mode is set to 1, the request body contains the node_public_cert_name field.
   124  	NodePublicCertName string `json:"node_public_cert_name,omitempty"`
   125  	// Whether to collect logs when cluster creation fails
   126  	// - 0: Do not collect.
   127  	// - 1: Collect.
   128  	// The default value is 1, indicating that OBS buckets will be created
   129  	// and only used to collect logs that record MRS cluster creation failures.
   130  	LogCollection *int `json:"log_collection,omitempty"`
   131  	// List of nodes.
   132  	NodeGroups []NodeGroup `json:"node_groups,omitempty"`
   133  	// List of service components to be installed.
   134  	ComponentList []ComponentList `json:"component_list"`
   135  	// Jobs can be submitted when a cluster is created. Currently, only one job can be created.
   136  	AddJobs []AddJobs `json:"add_jobs,omitempty"`
   137  	// Bootstrap action script information.
   138  	BootstrapScripts []BootstrapScript `json:"bootstrap_scripts,omitempty"`
   139  	// Number of Master nodes. If cluster HA is enabled, set this parameter to 2. If cluster HA is disabled, set this parameter to 1.
   140  	MasterNodeNum int `json:"master_node_num" required:"true"`
   141  	// Instance specifications of the Master node, for example, c6.4xlarge.4linux.mrs.
   142  	// MRS supports host specifications determined by CPU, memory, and disk space.
   143  	MasterNodeSize string `json:"master_node_size" required:"true"`
   144  	// Number of Core nodes
   145  	// Value range: 1 to 500
   146  	// A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support.
   147  	CoreNodeNum int `json:"core_node_num" required:"true"`
   148  	// Instance specifications of the Core node, for example, c6.4xlarge.4linux.mrs.
   149  	CoreNodeSize string `json:"core_node_size" required:"true"`
   150  	// This parameter is a multi-disk parameter, indicating the data disk storage type of the Master node. Currently, SATA, SAS and SSD are supported.
   151  	MasterDataVolumeType string `json:"master_data_volume_type,omitempty"`
   152  	// This parameter is a multi-disk parameter, indicating the data disk storage space of the Master node.
   153  	// To increase data storage capacity, you can add disks at the same time when creating a cluster.
   154  	// Value range: 100 GB to 32,000 GB
   155  	MasterDataVolumeSize int `json:"master_data_volume_size,omitempty"`
   156  	// This parameter is a multi-disk parameter, indicating the number of data disks of the Master node.
   157  	// The value can be set to 1 only.
   158  	MasterDataVolumeCount int `json:"master_data_volume_count,omitempty"`
   159  	// This parameter is a multi-disk parameter, indicating the data disk storage type of the Core node. Currently, SATA, SAS and SSD are supported.
   160  	CoreDataVolumeType string `json:"core_data_volume_type,omitempty"`
   161  	// This parameter is a multi-disk parameter, indicating the data disk storage space of the Core node.
   162  	// To increase data storage capacity, you can add disks at the same time when creating a cluster.
   163  	// Value range: 100 GB to 32,000 GB
   164  	CoreDataVolumeSize int `json:"core_data_volume_size,omitempty"`
   165  	// This parameter is a multi-disk parameter, indicating the number of data disks of the Core node.
   166  	// Value range: 1 to 10
   167  	CoreDataVolumeCount int `json:"core_data_volume_count,omitempty"`
   168  	// Data disk storage type of the Master and Core nodes. Currently, SATA, SAS and SSD are supported.
   169  	// Disk parameters can be represented by volume_type and volume_size, or multi-disk parameters.
   170  	// If the volume_type and volume_size parameters coexist with the multi-disk parameters,
   171  	// the system reads the volume_type and volume_size parameters first. You are advised to use the multi-disk parameters.
   172  	// SATA: Common I/O
   173  	// SAS: High I/O
   174  	// SSD: Ultra-high I/O
   175  	VolumeType string `json:"volume_type,omitempty"`
   176  	// Data disk storage space of the Master and Core nodes. To increase data storage capacity,
   177  	// you can add disks at the same time when creating a cluster. Select a proper disk storage space based on the following application scenarios:
   178  	// Separation of data storage and computing: Data is stored in the OBS system.
   179  	// Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time.
   180  	// It is recommended when data computing is infrequently performed.
   181  	// Integration of data storage and computing: Data is stored in the HDFS system.
   182  	// Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term.
   183  	// It is recommended when data computing is frequently performed.
   184  	// Value range: 100 GB to 32,000 GB
   185  	// This parameter is not recommended. For details, see the description of the volume_type parameter.
   186  	VolumeSize int `json:"volume_size,omitempty"`
   187  }
   188  
   189  func Create(client *golangsdk.ServiceClient, opts CreateOpts) (*CreateResponse, error) {
   190  	b, err := build.RequestBody(opts, "")
   191  	if err != nil {
   192  		return nil, err
   193  	}
   194  
   195  	// POST /v1.1/{project_id}/run-job-flow
   196  	raw, err := client.Post(client.ServiceURL("run-job-flow"), b, nil, &golangsdk.RequestOpts{
   197  		OkCodes: []int{200},
   198  	})
   199  	if err != nil {
   200  		return nil, err
   201  	}
   202  
   203  	var res CreateResponse
   204  	err = extract.Into(raw.Body, &res)
   205  	return &res, err
   206  }
   207  
   208  type NodeGroup struct {
   209  	// Node group name.
   210  	// - master_node_default_group
   211  	// - core_node_analysis_group
   212  	// - core_node_streaming_group
   213  	// - task_node_analysis_group
   214  	// - task_node_streaming_group
   215  	GroupName string `json:"group_name"`
   216  	// Number of nodes.
   217  	// The value ranges from 0 to 500 and the default value is 0.
   218  	// The total number of Core and Task nodes cannot exceed 500.
   219  	NodeNum int `json:"node_num"`
   220  	// Instance specifications of a node.
   221  	// For details about the configuration method, see the remarks of master_node_size.
   222  	NodeSize string `json:"node_size"`
   223  	// Data disk storage space of a node.
   224  	RootVolumeSize string `json:"root_volume_size,omitempty"`
   225  	// Data disk storage type of a node.
   226  	// Currently, SATA, SAS and SSD are supported.
   227  	// - SATA: Common I/O
   228  	// - SAS: High I/O
   229  	// - SSD: Ultra-high I/O
   230  	RootVolumeType string `json:"root_volume_type,omitempty"`
   231  	// Data disk storage type of a node.
   232  	// Currently, SATA, SAS and SSD are supported.
   233  	// - SATA: Common I/O
   234  	// - SAS: High I/O
   235  	// - SSD: Ultra-high I/O
   236  	DataVolumeType string `json:"data_volume_type,omitempty"`
   237  	// Number of data disks of a node.
   238  	// Value range: 0 to 10
   239  	DataVolumeCount *int `json:"data_volume_count,omitempty"`
   240  	// Data disk storage space of a node.
   241  	// Value range: 100 GB to 32,000 GB
   242  	DataVolumeSize *int `json:"data_volume_size,omitempty"`
   243  	// Auto scaling rule information.
   244  	// This parameter is valid only when group_name is set to task_node_analysis_group or task_node_streaming_group.
   245  	AutoScalingPolicy *AutoScalingPolicy `json:"auto_scaling_policy,omitempty"`
   246  }
   247  
   248  type AutoScalingPolicy struct {
   249  	// Whether to enable the auto scaling rule.
   250  	AutoScalingEnable bool `json:"auto_scaling_enable"`
   251  	// Minimum number of nodes left in the node group.
   252  	// Value range: 0 to 500
   253  	MinCapacity int `json:"min_capacity"`
   254  	// Maximum number of nodes in the node group.
   255  	// Value range: 0 to 500
   256  	MaxCapacity int `json:"max_capacity"`
   257  	// Resource plan list.
   258  	// If this parameter is left blank, the resource plan is disabled.
   259  	// When auto scaling is enabled, either a resource plan or an auto scaling rule must be configured.
   260  	// MRS 1.6.3 or later supports this parameter.
   261  	ResourcesPlans []ResourcesPlan `json:"resources_plans,omitempty"`
   262  	// List of custom scaling automation scripts.
   263  	// If this parameter is left blank, a hook script is disabled.
   264  	// MRS 1.7.2 or later supports this parameter.
   265  	ExecScripts []ExecScript `json:"exec_scripts,omitempty"`
   266  	// List of auto scaling rules.
   267  	// When auto scaling is enabled, either a resource plan or an auto scaling rule must be configured.
   268  	Rules []Rules `json:"rules,omitempty"`
   269  }
   270  
   271  type ResourcesPlan struct {
   272  	// Cycle type of a resource plan.
   273  	// Currently, only the following cycle type is supported:
   274  	// - daily
   275  	PeriodType string `json:"period_type"`
   276  	// Start time of a resource plan.
   277  	// The value is in the format of hour:minute, indicating that the time ranges from 0:00 to 23:59.
   278  	StartTime string `json:"start_time"`
   279  	// End time of a resource plan.
   280  	// The value is in the same format as that  of start_time.
   281  	// The interval between end_time and start_time must be greater than or equal to 30 minutes.
   282  	EndTime string `json:"end_time"`
   283  	// Minimum number of the preserved nodes in a node group in a resource plan.
   284  	// Value range: 0 to 500
   285  	MinCapacity int `json:"min_capacity"`
   286  	// Maximum number of the preserved nodes in a node group in a resource plan.
   287  	// Value range: 0 to 500
   288  	MaxCapacity int `json:"max_capacity"`
   289  }
   290  
   291  type Rules struct {
   292  	// Name of an auto scaling rule.
   293  	// It contains only 1 to 64 characters.
   294  	// Only letters, digits, hyphens (-), and underscores (_) are allowed.
   295  	// Rule names must be unique in a node group.
   296  	Name string `json:"name"`
   297  	// Description about an auto scaling rule.
   298  	// It contains a maximum of 1,024 characters.
   299  	Description string `json:"description,omitempty"`
   300  	// Auto scaling rule adjustment type.
   301  	// The options are as follows:
   302  	// - scale_out: cluster scale-out
   303  	// - scale_in: cluster scale-in
   304  	AdjustmentType string `json:"adjustment_type"`
   305  	// Cluster cooling time after an auto scaling rule is triggered, when no auto scaling operation is performed.
   306  	// The unit is minute.
   307  	// Value range: 0 to 10,080.
   308  	// One week is equal to 10,080 minutes.
   309  	CoolDownMinutes int `json:"cool_down_minutes"`
   310  	// Number of nodes that can be adjusted once.
   311  	// Value range: 1 to 100
   312  	ScalingAdjustment int `json:"scaling_adjustment"`
   313  	// Condition for triggering a rule.
   314  	Trigger *Trigger `json:"trigger"`
   315  }
   316  
   317  type Trigger struct {
   318  	// Metric name.
   319  	// This triggering condition makes a judgment according to the value of the metric.
   320  	// A metric name contains a maximum of 64 characters.
   321  	MetricName string `json:"metric_name"`
   322  	// Metric threshold to trigger a rule
   323  	// The parameter value must be an integer or number with two decimal places only.
   324  	MetricValue string `json:"metric_value"`
   325  	// Metric judgment logic operator.
   326  	// The options are as follows:
   327  	// - LT: less than
   328  	// - GT: greater than
   329  	// - LTOE: less than or equal to
   330  	// - GTOE: greater than or equal to
   331  	ComparisonOperator string `json:"comparison_operator,omitempty"`
   332  	// Number of consecutive five-minute periods, during which a metric threshold is reached
   333  	// Value range: 1 to 288
   334  	EvaluationPeriods int `json:"evaluation_periods"`
   335  }
   336  
   337  type ExecScript struct {
   338  	// Name of a custom automation script.
   339  	// It must be unique in a same cluster.
   340  	// The value can contain only digits, letters, spaces, hyphens (-),
   341  	// and underscores (_) and cannot start with a space.
   342  	// The value can contain 1 to 64 characters.
   343  	Name string `json:"name"`
   344  	// Path of a custom automation script.
   345  	// Set this parameter to an OBS bucket path or a local VM path.
   346  	// - OBS bucket path: Enter a script path manually, for example, s3a://XXX/scale.sh.
   347  	// - Local VM path: Enter a script path.
   348  	// 	 The script path must start with a slash (/) and end with .sh.
   349  	Uri string `json:"uri"`
   350  	// Parameters of a custom automation script.
   351  	// - Multiple parameters are separated by space.
   352  	// - The following predefined system parameters can be transferred:
   353  	//	 – ${mrs_scale_node_num}: Number of the nodes to be added or removed
   354  	//	 – ${mrs_scale_type}: Scaling type.
   355  	//     The value can be scale_out or scale_in.
   356  	//	 – ${mrs_scale_node_hostnames}: Host names of the nodes to be added or removed
   357  	//	 – ${mrs_scale_node_ips}: IP addresses of the nodes to be added or removed
   358  	//	 – ${mrs_scale_rule_name}: Name of the rule that triggers auto scaling
   359  	// - Other user-defined parameters are used in the same way as those of common shell scripts.
   360  	//   Parameters are separated by space.
   361  	Parameters string `json:"parameters,omitempty"`
   362  	// Type of a node where the custom automation script is executed.
   363  	// The node type can be Master, Core, or Task.
   364  	Nodes []string `json:"nodes"`
   365  	// Whether the custom automation script runs only on the active Master node.
   366  	// The default value is false, indicating that the custom automation script can run on all Master nodes.
   367  	ActiveMaster *bool `json:"active_master,omitempty"`
   368  	// Time when a script is executed.
   369  	// The following four options are supported:
   370  	// - before_scale_out: before scale-out
   371  	// - before_scale_in: before scale-in
   372  	// - after_scale_out: after scale-out
   373  	// - after_scale_in: after scale-in
   374  	ActionStage string `json:"action_stage"`
   375  	// Whether to continue to execute subsequent scripts
   376  	// and create a cluster after the custom automation script fails to be executed.
   377  	// - continue: Continue to execute subsequent scripts.
   378  	// - errorout: Stop the action.
   379  	// NOTE
   380  	// - You are advised to set this parameter to continue in the commissioning phase
   381  	//   so that the cluster can continue to be installed
   382  	//   and started no matter whether the custom automation script is executed successfully.
   383  	// - The scale-in operation cannot be undone.
   384  	// Therefore, fail_action must be set to continue for the scripts that are executed after scale-in.
   385  	FailAction string `json:"fail_action"`
   386  }
   387  
   388  type ComponentList struct {
   389  	// Component name
   390  	ComponentName string `json:"component_name"`
   391  }
   392  
   393  type AddJobs struct {
   394  	// Job type code
   395  	// - 1: MapReduce
   396  	// - 2: Spark
   397  	// - 3: Hive Script
   398  	// - 4: HiveQL (not supported currently)
   399  	// - 5: DistCp, importing and exporting data (not supported currently)
   400  	// - 6: Spark Script
   401  	// - 7: Spark SQL, submitting Spark SQL statements (not supported currently).
   402  	// NOTE
   403  	// Spark and Hive jobs can be added to only clusters that include Spark and Hive components.
   404  	JobType int `json:"job_type" required:"true"`
   405  	// Job name.
   406  	// It contains 1 to 64 characters.
   407  	// Only letters, digits, hyphens (-), and underscores (_) are allowed.
   408  	// NOTE
   409  	// Identical job names are allowed but not recommended.
   410  	JobName string `json:"job_name" required:"true"`
   411  	// Path of the JAR or SQL file for program execution.
   412  	// The parameter must meet the following requirements:
   413  	// - Contains a maximum of 1,023 characters,
   414  	// excluding special characters such as ;|&><'$. The parameter value cannot be empty or full of spaces.
   415  	// - Files can be stored in HDFS or OBS.
   416  	// The path varies depending on the file system.
   417  	// 	– OBS: The path must start with s3a://. Files or programs encrypted by KMS are not supported.
   418  	// 	– HDFS: The path starts with a slash (/).
   419  	// - Spark Script must end with .sql while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
   420  	JarPath string `json:"jar_path,omitempty"`
   421  	// Key parameter for program execution.
   422  	// The parameter is specified by the function of the user's program.
   423  	// MRS is only responsible for loading the parameter.
   424  	// The parameter contains a maximum of 2,047 characters,
   425  	// excluding special characters such as ;|&>'<$, and can be left blank.
   426  	Arguments string `json:"arguments,omitempty"`
   427  	// Address for inputting data.
   428  	// Files can be stored in HDFS or OBS.
   429  	// The path varies depending on the file system.
   430  	// - OBS: The path must start with s3a://.
   431  	// Files or programs encrypted by KMS are not supported.
   432  	// - HDFS: The path starts with a slash (/).
   433  	// The parameter contains a maximum of 1,023 characters,
   434  	// excluding special characters such as ;|&>'<$, and can be left blank.
   435  	Input string `json:"input,omitempty"`
   436  	// Address for outputting data.
   437  	// Files can be stored in HDFS or OBS.
   438  	// The path varies depending on the file system.
   439  	// - OBS: The path must start with s3a://.
   440  	// - HDFS: The path starts with a slash (/).
   441  	// If the specified path does not exist,
   442  	// the system will automatically create it.
   443  	// The parameter contains a maximum of 1,023 characters,
   444  	// excluding special characters such as ;|&>'<$, and can be left blank.
   445  	Output string `json:"output,omitempty"`
   446  	// Path for storing job logs that record job running status.
   447  	// Files can be stored in HDFS or OBS.
   448  	// The path varies depending on the file system.
   449  	// - OBS: The path must start with s3a://.
   450  	// - HDFS: The path starts with a slash (/).
   451  	// The parameter contains a maximum of 1,023 characters,
   452  	// excluding special characters such as ;|&>'<$, and can be left blank.
   453  	JobLog string `json:"job_log,omitempty"`
   454  	// Whether to delete the cluster after the job execution is complete
   455  	// - true: Yes
   456  	// - false: No
   457  	ShutdownCluster *bool `json:"shutdown_cluster,omitempty"`
   458  	// Data import and export
   459  	// - import
   460  	// - export
   461  	FileAction string `json:"file_action,omitempty"`
   462  	// - true: Submit a job during cluster creation.
   463  	// - false: Submit a job after the cluster is created.
   464  	// Set this parameter to true in this example.
   465  	SubmitJobOnceClusterRun *bool `json:"submit_job_once_cluster_run" required:"true"`
   466  	// HiveQL statement
   467  	Hql string `json:"hql,omitempty"`
   468  	// SQL program path.
   469  	// This parameter is needed by Spark Script and Hive Script jobs only, and must meet the following requirements:
   470  	// - Contains a maximum of 1,023 characters, excluding special characters such as ;|&><'$.
   471  	//   The parameter value cannot be empty or full of spaces.
   472  	// - Files can be stored in HDFS or OBS. The path varies depending on the file system.
   473  	// 	– OBS: The path must start with s3a://.
   474  	// 	 	   Files or programs encrypted by KMS are not supported.
   475  	// 	– HDFS: The path starts with a slash (/).
   476  	// - Ends with .sql. sql is case-insensitive.
   477  	HiveScriptPath string `json:"hive_script_path" required:"true"`
   478  }
   479  
   480  type BootstrapScript struct {
   481  	// Name of a bootstrap action script.
   482  	// It must be unique in a cluster.
   483  	// The value can contain only digits, letters, spaces, hyphens (-),
   484  	// and underscores (_) and cannot start with a space.
   485  	// The value can contain 1 to 64 characters.
   486  	Name string `json:"name" required:"true"`
   487  	// Path of a bootstrap action script.
   488  	// Set this parameter to an OBS bucket path or a local VM path.
   489  	// - OBS bucket path: Enter a script path manually.
   490  	//   For example, enter the path of the public sample script provided by MRS.
   491  	//   Example: s3a://bootstrap/presto/presto-install.sh.
   492  	//   If dualroles is installed, the parameter of the presto-install. sh script is dualroles.
   493  	//   If worker is installed, the parameter of the presto-install.sh script is worker.
   494  	//   Based on the Presto usage habit,
   495  	//   you are advised to install dualroles on the active Master nodes and worker on the Core nodes.
   496  	// - Local VM path: Enter a script path.
   497  	//   The script path must start with a slash (/) and end with .sh.
   498  	Uri string `json:"uri" required:"true"`
   499  	// Bootstrap action script parameters.
   500  	Parameters string `json:"parameters,omitempty"`
   501  	// Type of a node where the bootstrap action script is executed.
   502  	// The value can be Master, Core, or Task.
   503  	Nodes []string `json:"nodes" required:"true"`
   504  	// Whether the bootstrap action script runs only on active Master nodes.
   505  	// The default value is false, indicating that the bootstrap action script can run on all Master nodes.
   506  	ActiveMaster *bool `json:"active_master,omitempty"`
   507  	// Time when the bootstrap action script is executed.
   508  	// Currently, the following two options are available:
   509  	// Before component start and After component start
   510  	// The default value is false,
   511  	// indicating that the bootstrap action script is executed after the component is started.
   512  	BeforeComponentStart *bool `json:"before_component_start,omitempty"`
   513  	// Whether to continue executing subsequent scripts
   514  	// and creating a cluster after the bootstrap action script fails to be executed.
   515  	// - continue: Continue to execute subsequent scripts.
   516  	// - errorout: Stop the action.
   517  	//   The default value is errorout, indicating that the action is stopped.
   518  	// NOTE
   519  	// You are advised to set this parameter to continue in the commissioning phase
   520  	// so that the cluster can continue to be installed
   521  	// and started no matter whether the bootstrap action is successful.
   522  	FailAction string `json:"fail_action" required:"true"`
   523  }
   524  
   525  type CreateResponse struct {
   526  	// Cluster ID, which is returned by the system after the cluster is created.
   527  	ClusterId string `json:"cluster_id"`
   528  	// Operation result.
   529  	// - true: The operation is successful.
   530  	// - false: The operation failed.
   531  	Result bool `json:"result"`
   532  	// System message, which can be empty.
   533  	Msg string `json:"msg"`
   534  }