github.com/chnsz/golangsdk@v0.0.0-20240506093406-85a3fbfa605b/openstack/mrs/v2/clusters/requests.go (about)

     1  package clusters
     2  
     3  import (
     4  	"github.com/chnsz/golangsdk"
     5  	"github.com/chnsz/golangsdk/openstack/common/tags"
     6  )
     7  
     8  // CreateOpts is a structure representing information of the cluster creation.
     9  type CreateOpts struct {
    10  	// Region of the cluster.
    11  	Region string `json:"region" required:"true"`
    12  	// Availability zone name.
    13  	AvailabilityZone string `json:"availability_zone" required:"true"`
    14  	// Cluster name, which can contain 2 to 64 characters.
    15  	// Only letters, digits, hyphens (-), and underscores (_) are allowed.
    16  	ClusterName string `json:"cluster_name" required:"true"`
    17  	// Cluster type. The options are as follows:
    18  	//   ANALYSIS: analysis cluster
    19  	//   STREAMING: streaming cluster
    20  	//   MIXED: hybrid cluster
    21  	//   CUSTOM: customized cluster, which is supported only by MRS 3.x.
    22  	ClusterType string `json:"cluster_type" required:"true"`
    23  	// Cluster version.
    24  	// Possible values are as follows:
    25  	//   MRS 1.8.10
    26  	//   MRS 1.9.2
    27  	//   MRS 2.1.0
    28  	//   MRS 3.0.2
    29  	ClusterVersion string `json:"cluster_version" required:"true"`
    30  	// Name of the VPC where the subnet locates
    31  	// Perform the following operations to obtain the VPC name from the VPC management console:
    32  	// Log in to the management console.
    33  	// Click Virtual Private Cloud and select Virtual Private Cloud from the left list.
    34  	// On the Virtual Private Cloud page, obtain the VPC name from the list.
    35  	VpcName string `json:"vpc_name" required:"true"`
    36  	VpcId   string `json:"vpc_id,omitempty"`
    37  	// List of component names, which are separated by commas (,). The options are as follows:
    38  	// MRS 3.0.5
    39  	//   ANALYSIS: Hadoop,Spark2x,HBase,Hive,Hue,Loader,Flink,Oozie,ZooKeeper,Ranger,Tez,Impala,Presto,Kudu,Alluxio
    40  	//   STREAMING: Kafka,Storm,Flume,ZooKeeper,Ranger
    41  	//   MIXED: Hadoop,Spark2x,HBase,Hive,Hue,Loader,Flink,Oozie,ZooKeeper,Ranger,Tez,Impala,Presto,Kudu,Alluxio,
    42  	//     Kafka,Storm,Flume
    43  	//   CUSTOM: Hadoop,Spark2x,HBase,Hive,Hue,Loader,Kafka,Storm,Flume,Flink,Oozie,ZooKeeper,Ranger,Tez,Impala,
    44  	//     Presto,ClickHouse,Kudu,Alluxio
    45  	// MRS 1.9.2
    46  	//   ANALYSIS: Presto,Hadoop,Spark,HBase,Opentsdb,Hive,Hue,Loader,Tez,Flink,Alluxio,Ranger
    47  	//   STREAMING: Kafka,KafkaManager,Storm,Flume
    48  	Components string `json:"components" required:"true"`
    49  	// Node login mode.
    50  	// PASSWORD: password-based login. If this value is selected, node_root_password cannot be left blank.
    51  	// KEYPAIR: specifies the key pair used for login. If this value is selected, node_keypair_name cannot be left blank.
    52  	LoginMode string `json:"login_mode" required:"true"`
    53  	// Password of the MRS Manager administrator.
    54  	// The password can contain 8 to 26 charactors.
    55  	// The password must contain lowercase letters, uppercase letters, digits, spaces
    56  	// and the special characters: !?,.:-_{}[]@$^+=/.
    57  	// the password cannot be the username or the username spelled backwards.
    58  	ManagerAdminPassword string `json:"manager_admin_password" required:"true"`
    59  	// Information about the node groups in the cluster. For details about the parameters, see Table 5.
    60  	NodeGroups []NodeGroupOpts `json:"node_groups" required:"true"`
    61  	// Running mode of an MRS cluster
    62  	// SIMPLE: normal cluster. In a normal cluster, Kerberos authentication is disabled, and users can use all functions provided by the cluster.
    63  	// KERBEROS: security cluster. In a security cluster, Kerberos authentication is enabled, and common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use more cluster functions, the users must contact the Manager administrator to assign more permissions.
    64  	SafeMode string `json:"safe_mode" required:"true"`
    65  	// Jobs can be submitted when a cluster is created. Currently, only one job can be created. For details about job parameters, see Table 9.
    66  	AddJobs []JobOpts `json:"add_jobs,omitempty"`
    67  	// Whether to create the default security group of the MR S security group, the default is false.
    68  	// If true, no matter what 'security_groups_id' is set, the cluster will create a default security group.
    69  	AutoCreateSecGroup string `json:"auto_create_default_security_group,omitempty"`
    70  	// Bootstrap action script information. For more parameter description, see Table 8.
    71  	// MRS 1.7.2 or later supports this parameter.
    72  	BootstrapScripts []ScriptOpts `json:"bootstrap_scripts,omitempty"`
    73  	// Charging type information.
    74  	ChargeInfo *ChargeInfo `json:"charge_info,omitempty"`
    75  	// Indicates the enterprise project ID.
    76  	// When creating a cluster, associate the enterprise project ID with the cluster.
    77  	// The default value is 0, indicating the default enterprise project.
    78  	// To obtain the enterprise project ID, see the id value in the enterprise_project field data structure table in section Querying the Enterprise Project List of the Enterprise Management API Reference.
    79  	EnterpriseProjectId string `json:"enterprise_project_id,omitempty"`
    80  	// An EIP bound to an MRS cluster can be used to access MRS Manager. The EIP must have been created and must be in the same region as the cluster.
    81  	EipAddress string `json:"eip_address,omitempty"`
    82  	// ID of the bound EIP. This parameter is mandatory when eip_address is configured. To obtain the EIP ID, log in to the VPC console, choose Network > Elastic IP and Bandwidth > Elastic IP, click the EIP to be bound, and obtain the ID in the Basic Information area.
    83  	EipId string `json:"eip_id,omitempty"`
    84  	// Indicate whether it is a dedicated cloud resource, the default is false.
    85  	IsDecProject *bool `json:"is_dec_project,omitempty"`
    86  	// Specifies whether to collect logs when cluster creation fails:
    87  	//   0: Do not collect.
    88  	//   1: Collect.
    89  	// The default value is 1, indicating that OBS buckets will be created and only used to collect logs that record MRS cluster creation failures.
    90  	LogCollection *int `json:"log_collection,omitempty"`
    91  	// Name of the agency bound to a cluster node by default. The value is fixed to MRS_ECS_DEFAULT_AGENCY.
    92  	// An agency allows ECS or BMS to manage MRS resources. You can configure an agency of the ECS type to automatically obtain the AK/SK to access OBS.
    93  	// The MRS_ECS_DEFAULT_AGENCY agency has the OBS OperateAccess permission of OBS and the CES FullAccess (for users who have enabled fine-grained policies), CES Administrator, and KMS Administrator permissions in the region where the cluster is located.
    94  	MrsEcsDefaultAgency string `json:"mrs_ecs_default_agency,omitempty"`
    95  	// Password of user root for logging in to a cluster node.
    96  	// The password can contain 8 to 26 charactors.
    97  	// The password must contain lowercase letters, uppercase letters, digits, spaces
    98  	// and the special characters: !?,.:-_{}[]@$^+=/.
    99  	// the password cannot be the username or the username spelled backwards.
   100  	NodeRootPassword string `json:"node_root_password,omitempty"`
   101  	// Name of a key pair You can use a key pair to log in to the Master node in the cluster.
   102  	NodeKeypair string `json:"node_keypair_name,omitempty"`
   103  	// Security group ID of the cluster
   104  	// If this parameter is left blank, MRS automatically creates a security group, whose name starts with mrs_{cluster_name}.
   105  	// If this parameter is not left blank, a fixed security group is used to create a cluster. The transferred ID must be the security group ID owned by the current tenant. The security group must include an inbound rule in which all protocols and all ports are allowed and the source is the IP address of the specified node on the management plane.
   106  	SecurityGroupsIds string `json:"security_groups_id,omitempty"`
   107  	// Subnet ID.
   108  	SubnetId string `json:"subnet_id,omitempty"`
   109  	// Subnet name.
   110  	// Required if SubnetID is empty.
   111  	SubnetName string `json:"subnet_name,omitempty"`
   112  	// Specifies the template used for node deployment when the cluster type is CUSTOM.
   113  	// mgmt_control_combined_v2: template for jointly deploying the management and control nodes. The management and control roles are co-deployed on the Master node, and data instances are deployed in the same node group. This deployment mode applies to scenarios where the number of control nodes is less than 100, reducing costs.
   114  	// mgmt_control_separated_v2: The management and control roles are deployed on different master nodes, and data instances are deployed in the same node group. This deployment mode is applicable to a cluster with 100 to 500 nodes and delivers better performance in high-concurrency load scenarios.
   115  	// mgmt_control_data_separated_v2: The management role and control role are deployed on different Master nodes, and data instances are deployed in different node groups. This deployment mode is applicable to a cluster with more than 500 nodes. Components can be deployed separately, which can be used for a larger cluster scale.
   116  	TemplateId string `json:"template_id,omitempty"`
   117  	// Cluster tag For more parameter description, see Table 4.
   118  	// A maximum of 10 tags can be added to a cluster.
   119  	Tags []tags.ResourceTag `json:"tags,omitempty"`
   120  	// the component configurations of MRS cluster.
   121  	ComponentConfigs []ComponentConfigOpts `json:"component_configs,omitempty"`
   122  	// When deploying components such as Hive and Ranger, you can associate data connections and store metadata in associated databases
   123  	ExternalDatasources []ExternalDatasource `json:"external_datasources,omitempty"`
   124  	// The OBS path to which cluster logs are dumped.
   125  	// This parameter is available only for cluster versions that support dumping cluster logs to OBS.
   126  	LogURI string `json:"log_uri,omitempty"`
   127  	// The alarm configuration of the cluster.
   128  	SMNNotifyConfig *SMNNotifyConfigOpts `json:"smn_notify,omitempty"`
   129  }
   130  
   131  // SMNNotifyConfigOpts is a structure representing the alarm configuration information.
   132  type SMNNotifyConfigOpts struct {
   133  	// The Uniform Resource Name (URN) of the topic.
   134  	TopicURN string `json:"topic_urn" required:"true"`
   135  	// The subscription rule name.
   136  	SubscriptionName string `json:"subscription_name" required:"true"`
   137  }
   138  
   139  type ExternalDatasource struct {
   140  	ConnectorId   string `json:"connector_id,omitempty"`
   141  	ComponentName string `json:"component_name,omitempty"`
   142  	/**
   143  	Component role type. The options are as follows:
   144  		hive_metastore: Hive Metastore role
   145  		hive_data: Hive role
   146  		hbase_data: HBase role
   147  		ranger_data: Ranger role
   148  	**/
   149  	RoleType string `json:"role_type,omitempty"`
   150  	/**
   151  	Data connection type. The options are as follows:
   152  		LOCAL_DB: local metadata
   153  		RDS_POSTGRES: RDS PostgreSQL database
   154  		RDS_MYSQL: RDS MySQL database
   155  		gaussdb-mysql: GaussDB(for MySQL)
   156  	**/
   157  	SourceType string `json:"source_type,omitempty"`
   158  }
   159  
   160  // ChargeInfo is a structure representing billing information.
   161  type ChargeInfo struct {
   162  	// Billing mode.
   163  	// The valid values are as follows:
   164  	//   postPaid: indicates the pay-per-use billing mode.
   165  	//   prePaid: indicates the yearly/monthly billing mode.
   166  	ChargeMode string `json:"charge_mode" required:"true"`
   167  	// Specifies the unit of the subscription term.
   168  	// This parameter is valid and mandatory only when chargingMode is set to prePaid.
   169  	//   month: indicates that the unit is month.
   170  	//   year: indicates that the unit is year.
   171  	PeriodType string `json:"period_type,omitempty"`
   172  	// Specifies the subscription term. This parameter is valid and mandatory only when chargingMode is set to prePaid.
   173  	//   When periodType is set to month, the parameter value ranges from 1 to 9.
   174  	//   When periodType is set to year, the parameter value ranges from 1 to 3.
   175  	PeriodNum int `json:"period_num,omitempty"`
   176  	// Specifies whether to pay immediately. This parameter is valid only when chargingMode is set to prePaid. The default value is false.
   177  	//   false: indicates not to pay immediately after an order is created.
   178  	//   true: indicates to pay immediately after an order is created. The system will automatically deduct fees from the account balance.
   179  	IsAutoPay *bool `json:"is_auto_pay,omitempty"`
   180  }
   181  
   182  // NodeGroupOpts is a structure representing node group.
   183  type NodeGroupOpts struct {
   184  	// Instance specifications of a node.
   185  	NodeSize string `json:"node_size" required:"true"`
   186  	// Specifies the node group name.
   187  	// The rules for configuring node groups are as follows:
   188  	//     master_node_default_group: Master node group, which must be included in all cluster types.
   189  	//     core_node_analysis_group: analysis Core node group, which must be contained in the analysis cluster and
   190  	//         hybrid cluster.
   191  	//     core_node_streaming_group: indicates the streaming Core node group, which must be included in both streaming
   192  	//         and hybrid clusters.
   193  	//     task_node_analysis_group: Analysis Task node group.
   194  	//         This node group can be selected for analysis clusters and hybrid clusters as required.
   195  	//     task_node_streaming_group: streaming Task node group.
   196  	//         This node group can be selected for streaming clusters and hybrid clusters as required.
   197  	//     node_group{x}: node group of the customized cluster. A maximum of nine node groups can be added.
   198  	//         The value can contain a maximum of 64 characters, including letters, digits and underscores (_).
   199  	GroupName string `json:"group_name" required:"true"`
   200  	// Number of nodes.
   201  	NodeNum int `json:"node_num" required:"true"`
   202  	// Specifies the system disk information of the node.
   203  	RootVolume *Volume `json:"root_volume,omitempty"`
   204  	// Data disk information.
   205  	DataVolume *Volume `json:"data_volume,omitempty"`
   206  	// Number of data disks of a node. The value range is 0 to 10.
   207  	DataVolumeCount *int `json:"data_volume_count,omitempty"`
   208  	// Billing type of the node group.
   209  	ChargeInfo *ChargeInfo `json:"charge_info,omitempty"`
   210  	// Autoscaling rule corresponding to the node group.
   211  	AsPolicy *AsPolicy `json:"auto_scaling_policy,omitempty"`
   212  	// This parameter is mandatory when the cluster type is CUSTOM. Specifies the roles deployed in a node group.
   213  	// This parameter is a character string array. Each character string represents a role expression.
   214  	// Role expression definition:
   215  	//     If the role is deployed on all nodes in the node group, set this parameter to <role name>, e.g. DataNode.
   216  	//     If the role is deployed on a specified subscript node in the node group:
   217  	//         <role name>:<index1>,<index2>..., <indexN>, e.g. NameNode:1,2.
   218  	//     Some roles support multi-instance deployment (that is, multiple instances of the same role are deployed on a
   219  	//         node): <role name>[<instance count>], for example, EsNode[9].
   220  	AssignedRoles []string `json:"assigned_roles,omitempty"`
   221  }
   222  
   223  // Volume is a structure representing node volume configurations.
   224  type Volume struct {
   225  	// Disk Type. The following disk types are supported:
   226  	//     SATA: common I/O disk
   227  	//     SAS: high I/O disk
   228  	//     SSD: ultra-high I/O disk
   229  	Type string `json:"type" required:"true"`
   230  	// Specifies the data disk size, in GB. The value range is 10 to 32768.
   231  	Size int `json:"size" required:"true"`
   232  }
   233  
   234  // AsPolicy is a structure representing auto-scaling policy for task nodes.
   235  type AsPolicy struct {
   236  	// Whether to enable the auto scaling rule.
   237  	Enabled string `json:"auto_scaling_enable" required:"true"`
   238  	// Minimum number of nodes left in the node group. The value range is 0 to 500.
   239  	MinCapacity int `json:"min_capacity" required:"true"`
   240  	// Maximum number of nodes in the node group. The value range is 0 to 500.
   241  	MaxCapacity int `json:"max_capacity" required:"true"`
   242  	// List of the resource plan.
   243  	ResourcesPlans []ResourcesPlan `json:"resources_plans,omitempty"`
   244  	// List of custom scaling automation scripts.
   245  	Rules []Rule `json:"rules,omitempty"`
   246  	// List of auto scaling rules.
   247  	ExecScripts []ScaleScript `json:"exec_scripts,omitempty"`
   248  }
   249  
   250  // ResourcesPlan is a structure representing resource plan of the policy.
   251  type ResourcesPlan struct {
   252  	// Cycle type of a resource plan.
   253  	PeriodType string `json:"period_type" required:"true"`
   254  	// Start time of a resource plan.
   255  	// The value is in the format of hour:minute, indicating that the time ranges from 0:00 to 23:59.
   256  	StartTime string `json:"start_time" required:"true"`
   257  	// End time of a resource plan. The value is in the same format as that of start_time.
   258  	// The interval between end_time and start_time must be greater than or equal to 30 minutes.
   259  	EndTime string `json:"end_time" required:"true"`
   260  	// Minimum number of the preserved nodes in a node group in a resource plan. The value range is 0 to 500.
   261  	MinCapacity int `json:"min_capacity" required:"true"`
   262  	// Maximum number of the preserved nodes in a node group in a resource plan. The value range is 0 to 500.
   263  	MaxCapacity int `json:"max_capacity" required:"true"`
   264  }
   265  
   266  // Rule is a structure representing configuration of the auto-scaling rule.
   267  type Rule struct {
   268  	// Auto scaling rule adjustment type. The options are scale_out and scale_in.
   269  	AdjustmentType string `json:"adjustment_type" required:"true"`
   270  	// Cluster cooling time after an auto scaling rule is triggered, when no auto scaling operation is performed.
   271  	// The unit is minute.
   272  	CoolDownMinutes int `json:"cool_down_minutes" required:"true"`
   273  	// Unique name of an auto scaling rule. A cluster name can contain only 1 to 64 characters.
   274  	// Only letters, digits, hyphens (-), and underscores (_) are allowed.
   275  	Name string `json:"name" required:"true"`
   276  	// Number of nodes that can be adjusted once. The value range is 1 to 100.
   277  	ScalingAdjustment int `json:"scaling_adjustment" required:"true"`
   278  	// Condition for triggering a rule.
   279  	Trigger Trigger `json:"trigger" required:"true"`
   280  	// Description about an auto scaling rule. It contains a maximum of 1,024 characters.
   281  	Description *string `json:"description,omitempty"`
   282  }
   283  
   284  // Trigger is a structure representing the condition for the triggering a rule.
   285  type Trigger struct {
   286  	// Number of consecutive five-minute periods, during which a metric threshold is reached.
   287  	// The value range is 1 to 288.
   288  	EvaluationPeriods int `json:"evaluation_periods" required:"true"`
   289  	// Metric name.
   290  	MetricName string `json:"metric_name" required:"true"`
   291  	// Metric threshold to trigger a rule.
   292  	MetricValue string `json:"metric_value" required:"true"`
   293  	// Metric judgment logic operator. The options are LT, GT, LTOE and GTOE.
   294  	ComparisonOperator string `json:"comparison_operator,omitempty"`
   295  }
   296  
   297  // ScriptOpts is a structure representing the bootstrap action script information.
   298  type ScriptOpts struct {
   299  	// Whether to continue executing subsequent scripts and creating a cluster after the bootstrap action script fails to be executed.
   300  	// continue: Continue to execute subsequent scripts.
   301  	// errorout: Stop the action.
   302  	// The default value is errorout, indicating that the action is stopped.
   303  	// NOTE:
   304  	// You are advised to set this parameter to continue in the commissioning phase so that the cluster can continue to be installed and started no matter whether the bootstrap action is successful.
   305  	FailAction string `json:"fail_action" required:"true"`
   306  	// Name of a bootstrap action script. It must be unique in a cluster.
   307  	// The value can contain only digits, letters, spaces, hyphens (-), and underscores (_) and must not start with a space.
   308  	// The value can contain 1 to 64 characters.
   309  	Name string `json:"name" required:"true"`
   310  	// Type of a node where the bootstrap action script is executed. The value can be Master, Core, or Task.
   311  	Nodes []string `json:"nodes" required:"true"`
   312  	// Bootstrap action script parameters.
   313  	Parameters string `json:"parameters,omitempty"`
   314  	// Path of a bootstrap action script. Set this parameter to an OBS bucket path or a local VM path.
   315  	// OBS bucket path: Enter a script path manually. For example, enter the path of the public sample script provided by MRS. Example: s3a://bootstrap/presto/presto-install.sh. If dualroles is installed, the parameter of the presto-install.sh script is dualroles. If worker is installed, the parameter of the presto-install.sh script is worker. Based on the Presto usage habit, you are advised to install dualroles on the active Master nodes and worker on the Core nodes.
   316  	// Local VM path: Enter a script path. The script path must start with a slash (/) and end with .sh.
   317  	URI string `json:"uri" required:"true"`
   318  	// Whether the bootstrap action script runs only on active Master nodes.
   319  	// The default value is false, indicating that the bootstrap action script can run on all Master nodes.
   320  	ActiveMaster *bool `json:"active_master,omitempty"`
   321  	// Time when the bootstrap action script is executed. Currently, the following two options are available: Before component start and After component start
   322  	// The default value is false, indicating that the bootstrap action script is executed after the component is started.
   323  	BeforeComponentStart *bool `json:"before_component_start,omitempty"`
   324  	ExecuteNeedSudoRoot  *bool `json:"execute_need_sudo_root,omitempty"`
   325  }
   326  
   327  type ComponentConfigOpts struct {
   328  	// The component name of MRS cluster which has installed.
   329  	Name    string       `json:"component_name" required:"true"`
   330  	Configs []ConfigOpts `json:"configs" required:"true"`
   331  }
   332  type ConfigOpts struct {
   333  	// The configuration item key of component installed.
   334  	Key string `json:"key" required:"true"`
   335  	// The configuration item value of component installed.
   336  	Value string `json:"value" required:"true"`
   337  	// The configuration file name of component installed.
   338  	ConfigFileName string `json:"config_file_name" required:"true"`
   339  }
   340  
   341  // JobOpts is a structure representing the job which to execution.
   342  type JobOpts struct {
   343  	// SQL program path. This parameter is needed by Spark Script and Hive Script jobs only, and must meet the following requirements:
   344  	// Contains a maximum of 1,023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces.
   345  	// Files can be stored in HDFS or OBS. The path varies depending on the file system.
   346  	// OBS: The path must start with s3a://. Files or programs encrypted by KMS are not supported.
   347  	// HDFS: The path starts with a slash (/).
   348  	// Ends with .sql. sql is case-insensitive.
   349  	HiveScriptPath string `json:"hive_script_path" required:"true"`
   350  	// Job name. It contains 1 to 64 characters. Only letters, digits, hyphens (-), and underscores (_) are allowed.
   351  	// NOTE:
   352  	// Identical job names are allowed but not recommended.
   353  	JobName string `json:"job_name" required:"true"`
   354  	// Job type code
   355  	//   1: MapReduce
   356  	//   2: Spark
   357  	//   3: Hive Script
   358  	//   4: HiveQL (not supported currently)
   359  	//   5: DistCp, importing and exporting data (not supported currently)
   360  	//   6: Spark Script
   361  	//   7: Spark SQL, submitting Spark SQL statements (not supported currently).
   362  	// NOTE:
   363  	// Spark and Hive jobs can be added to only clusters that include Spark and Hive components.
   364  	JobType int `json:"job_type" required:"true"`
   365  	// true: Submit a job during cluster creation.
   366  	// false: Submit a job after the cluster is created.
   367  	// Set this parameter to true in this example.
   368  	SubmitJobOnceClusterRun bool `json:"submit_job_once_cluster_run" required:"true"`
   369  	// Key parameter for program execution. The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter.
   370  	// The parameter contains a maximum of 2,047 characters, excluding special characters such as ;|&>'<$, and can be left blank.
   371  	Arguments string `json:"arguments,omitempty"`
   372  	// Data import and export
   373  	// import
   374  	// export
   375  	FileAction string `json:"file_action,omitempty"`
   376  	// HiveQL statement
   377  	Hql string `json:"hql,omitempty"`
   378  	// Address for inputting data
   379  	// Files can be stored in HDFS or OBS. The path varies depending on the file system.
   380  	//   OBS: The path must start with s3a://. Files or programs encrypted by KMS are not supported.
   381  	//   HDFS: The path starts with a slash (/).
   382  	// The parameter contains a maximum of 1,023 characters, excluding special characters such as ;|&>'<$, and can be left blank.
   383  	Input string `json:"input,omitempty"`
   384  	// Path of the JAR or SQL file for program execution. The parameter must meet the following requirements:
   385  	// Contains a maximum of 1,023 characters, excluding special characters such as ;|&><'$. The parameter value cannot be empty or full of spaces.
   386  	// Files can be stored in HDFS or OBS. The path varies depending on the file system.
   387  	//   OBS: The path must start with s3a://. Files or programs encrypted by KMS are not supported.
   388  	//   HDFS: The path starts with a slash (/).
   389  	// Spark Script must end with .sql while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
   390  	JarPath string `json:"jar_path,omitempty"`
   391  	// Path for storing job logs that record job running status.
   392  	// Files can be stored in HDFS or OBS. The path varies depending on the file system.
   393  	//   OBS: The path must start with s3a://.
   394  	//   HDFS: The path starts with a slash (/).
   395  	// The parameter contains a maximum of 1,023 characters, excluding special characters such as ;|&>'<$, and can be left blank.
   396  	JobLog string `json:"job_log,omitempty"`
   397  	// Address for outputting data
   398  	// Files can be stored in HDFS or OBS. The path varies depending on the file system.
   399  	//   OBS: The path must start with s3a://.
   400  	//   HDFS: The path starts with a slash (/).
   401  	// If the specified path does not exist, the system will automatically create it.
   402  	// The parameter contains a maximum of 1,023 characters, excluding special characters such as ;|&>'<$, and can be left blank.
   403  	Output string `json:"output,omitempty"`
   404  	// Whether to delete the cluster after the job execution is complete.
   405  	ShutdownCluster bool `json:"shutdown_cluster,omitempty"`
   406  }
   407  
   408  // ScaleScript is a structure representing the auto-scaling rules.
   409  type ScaleScript struct {
   410  	// Unique name of a custom automation script. The value can contain 1 to 64 characters.
   411  	// The value can contain digits, letters, spaces, hyphens (-), and underscores (_) and must not start with a space.
   412  	Name string `json:"name" required:"true"`
   413  	// Path of a custom automation script. Set this parameter to an OBS bucket path or a local VM path.
   414  	//     OBS bucket path: Enter a script path manually. for example, s3a://XXX/scale.sh.
   415  	//     Local VM path: Enter a script path. The script path must start with a slash (/) and end with .sh.
   416  	URI string `json:"uri" required:"true"`
   417  	// Type of a node where the custom automation script is executed. The node type can be Master, Core, or Task.
   418  	Nodes []string `json:"nodes" required:"true"`
   419  	// Time when a script is executed. The following four options are supported:
   420  	//     before_scale_out: before scale-out
   421  	//     before_scale_in: before scale-in
   422  	//     after_scale_out: after scale-out
   423  	//     after_scale_in: after scale-in
   424  	ActionStage string `json:"action_stage" required:"true"`
   425  	// Whether to continue to execute subsequent scripts and create a cluster after the custom automation script fails
   426  	// to be executed.
   427  	//     continue: Continue to execute subsequent scripts.
   428  	//     errorout: Stop the action.
   429  	FailAction string `json:"fail_action" required:"true"`
   430  	// Parameters of a custom automation script. Multiple parameters are separated by space.
   431  	// The following predefined system parameters can be transferred:
   432  	//     ${mrs_scale_node_num}: Number of the nodes to be added or removed.
   433  	//     ${mrs_scale_type}: Scaling type. The value can be scale_out or scale_in.
   434  	//     ${mrs_scale_node_hostnames}: Host names of the nodes to be added or removed.
   435  	//     ${mrs_scale_node_ips}: IP addresses of the nodes to be added or removed.
   436  	//     ${mrs_scale_rule_name}: Name of the rule that triggers auto scaling.
   437  	// Other user-defined parameters are used in the same way as those of common shell scripts.
   438  	Parameters string `json:"parameters,omitempty"`
   439  	// Whether the custom automation script runs only on the active Master node.
   440  	ActiveMaster bool `json:"active_master,omitempty"`
   441  }
   442  
   443  // CreateOptsBuilder is an interface which to support request body build of the cluster creation.
   444  type CreateOptsBuilder interface {
   445  	ToClusterCreateMap() (map[string]interface{}, error)
   446  }
   447  
   448  // ToClusterCreateMap is a method which to build a request body by the CreateOpts.
   449  func (opts CreateOpts) ToClusterCreateMap() (map[string]interface{}, error) {
   450  	return golangsdk.BuildRequestBody(opts, "")
   451  }
   452  
   453  // Create is a method to create a new mapreduce cluster.
   454  func Create(client *golangsdk.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {
   455  	b, err := opts.ToClusterCreateMap()
   456  	if err != nil {
   457  		r.Err = err
   458  		return
   459  	}
   460  
   461  	reqOpt := &golangsdk.RequestOpts{OkCodes: []int{200}}
   462  	_, r.Err = client.Post(rootURL(client), b, &r.Body, reqOpt)
   463  	return
   464  }