github.com/huaweicloud/golangsdk@v0.0.0-20210831081626-d823fe11ceba/openstack/mrs/v2/jobs/requests.go (about) 1 package jobs 2 3 import ( 4 "github.com/huaweicloud/golangsdk" 5 "github.com/huaweicloud/golangsdk/pagination" 6 ) 7 8 // CreateOpts is a structure representing information of the job creation. 9 type CreateOpts struct { 10 // Type of a job, and the valid values are as follows: 11 // MapReduce 12 // SparkSubmit 13 // HiveScript 14 // HiveSql 15 // DistCp, importing and exporting data 16 // SparkScript 17 // SparkSql 18 // Flink 19 // NOTE: 20 // Spark, Hive, and Flink jobs can be added to only clusters that include Spark, Hive, and Flink components. 21 JobType string `json:"job_type" required:"true"` 22 // Job name. It contains 1 to 64 characters. Only letters, digits, hyphens (-), and underscores (_) are allowed. 23 // NOTE: 24 // Identical job names are allowed but not recommended. 25 JobName string `json:"job_name" required:"true"` 26 // Key parameter for program execution. 27 // The parameter is specified by the function of the user's program. 28 // MRS is only responsible for loading the parameter. 29 // The parameter contains a maximum of 4,096 characters, excluding special characters such as ;|&>'<$, 30 // and can be left blank. 31 // NOTE: 32 // If you enter a parameter with sensitive information (such as the login password), the parameter may be exposed 33 // in the job details display and log printing. Exercise caution when performing this operation. 34 // For MRS 1.9.2 or later, a file path on OBS can start with obs://. To use this format to submit HiveScript or 35 // HiveSQL jobs, choose Components > Hive > Service Configuration on the cluster details page, set Type to All, 36 // and search for core.site.customized.configs. Add the endpoint configuration item (fs.obs.endpoint) of OBS and 37 // enter the endpoint corresponding to OBS in Value. Obtain the value from Regions and Endpoints. 38 // For MRS 3.0.2 or later, a file path on OBS can start with obs://. To use this format to submit HiveScript or 39 // HiveSQL jobs, choose Components > Hive > Service Configuration on Manager. Switch Basic to All, and search for 40 // core.site.customized.configs. Add the endpoint configuration item (fs.obs.endpoint) of OBS and enter the 41 // endpoint corresponding to OBS in Value. Obtain the value from Regions and Endpoints. 42 Arguments []string `json:"arguments,omitempty"` 43 // Program system parameter. 44 // The parameter contains a maximum of 2,048 characters, excluding special characters such as ><|'`&!\, and can be 45 // left blank. 46 Properties map[string]string `json:"properties,omitempty"` 47 } 48 49 // CreateOptsBuilder is an interface which to support request body build of the job creation. 50 type CreateOptsBuilder interface { 51 ToJobCreateMap() (map[string]interface{}, error) 52 } 53 54 // ToJobCreateMap is a method which to build a request body by the CreateOpts. 55 func (opts CreateOpts) ToJobCreateMap() (map[string]interface{}, error) { 56 return golangsdk.BuildRequestBody(opts, "") 57 } 58 59 // Create is a method to create a new mapreduce job. 60 func Create(client *golangsdk.ServiceClient, clusterId string, opts CreateOptsBuilder) (r CreateResult) { 61 reqBody, err := opts.ToJobCreateMap() 62 if err != nil { 63 r.Err = err 64 return 65 } 66 _, r.Err = client.Post(rootURL(client, clusterId), reqBody, &r.Body, &golangsdk.RequestOpts{ 67 OkCodes: []int{200}, 68 }) 69 return 70 } 71 72 // Get is a method to get an existing mapreduce job by cluster ID and job ID. 73 func Get(client *golangsdk.ServiceClient, clsuterId, jobId string) (r GetResult) { 74 _, r.Err = client.Get(resourceURL(client, clsuterId, jobId), &r.Body, nil) 75 return 76 } 77 78 // ListOpts is a structure representing information of the job updation. 79 type ListOpts struct { 80 // Job name. It contains 1 to 64 characters. Only letters, digits, hyphens (-), and underscores (_) are allowed. 81 JobName string `q:"job_name"` 82 // Type of a job, and the valid values are as follows: 83 // MapReduce 84 // SparkSubmit 85 // HiveScript 86 // HiveSql 87 // DistCp, importing and exporting data 88 // SparkScript 89 // SparkSql 90 // Flink 91 JobType string `q:"job_type"` 92 // Execution status of a job. 93 // FAILED: indicates that the job fails to be executed. 94 // KILLED: indicates that the job is terminated. 95 // New: indicates that the job is created. 96 // NEW_SAVING: indicates that the job has been created and is being saved. 97 // SUBMITTED: indicates that the job is submitted. 98 // ACCEPTED: indicates that the job is accepted. 99 // RUNNING: indicates that the job is running. 100 // FINISHED: indicates that the job is completed. 101 JobState string `q:"job_state"` 102 // Execution result of a job. 103 // FAILED: indicates that the job fails to be executed. 104 // KILLED: indicates that the job is manually terminated during execution. 105 // UNDEFINED: indicates that the job is being executed. 106 // SUCCEEDED: indicates that the job has been successfully executed. 107 JobResult string `q:"job_result"` 108 // Number of records displayed on each page in the returned result. The default value is 10. 109 Limit int `q:"limit"` 110 // Offset. 111 // The default offset from which the job list starts to be queried is 1. 112 Offset int `q:"offset"` 113 // Ranking mode of returned results. The default value is desc. 114 // asc: indicates that the returned results are ranked in ascending order. 115 // desc: indicates that the returned results are ranked in descending order. 116 SortBy string `q:"sort_by"` 117 // UTC timestamp after which a job is submitted, in milliseconds. For example, 1562032041362. 118 SubmittedTimeBegin int `q:"submitted_time_begin"` 119 // UTC timestamp before which a job is submitted, in milliseconds. For example, 1562032041362. 120 SubmittedTimeEnd int `q:"submitted_time_end"` 121 } 122 123 // ListOptsBuilder is an interface which to support request query build of the job list operation. 124 type ListOptsBuilder interface { 125 ToListQuery() (string, error) 126 } 127 128 // ToListQuery is a method which to build a request query by the ListOpts. 129 func (opts ListOpts) ToListQuery() (string, error) { 130 q, err := golangsdk.BuildQueryString(opts) 131 if err != nil { 132 return "", err 133 } 134 return q.String(), err 135 } 136 137 // List is a method to obtain an array of one or more mapreduce jobs according to the query parameters. 138 func List(client *golangsdk.ServiceClient, clusterId string, opts ListOptsBuilder) pagination.Pager { 139 url := rootURL(client, clusterId) 140 if opts != nil { 141 query, err := opts.ToListQuery() 142 if err != nil { 143 return pagination.Pager{Err: err} 144 } 145 url += query 146 } 147 148 return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { 149 return JobPage{pagination.SinglePageBase(r)} 150 }) 151 } 152 153 // DeleteOpts is a structure representing information of the job delete operation. 154 type DeleteOpts struct { 155 JobIds []string `json:"job_id_list,omitempty"` 156 } 157 158 // DeleteOptsBuilder is an interface which to support request body build of the job delete operation. 159 type DeleteOptsBuilder interface { 160 ToJobDeleteMap() (map[string]interface{}, error) 161 } 162 163 // ToJobDeleteMap is a method which to build a request body by the DeleteOpts. 164 func (opts DeleteOpts) ToJobDeleteMap() (map[string]interface{}, error) { 165 return golangsdk.BuildRequestBody(opts, "") 166 } 167 168 // Delete is a method to delete an existing mapreduce job. 169 func Delete(client *golangsdk.ServiceClient, clusterId string, opts DeleteOptsBuilder) (r DeleteResult) { 170 reqBody, err := opts.ToJobDeleteMap() 171 if err != nil { 172 r.Err = err 173 return 174 } 175 _, r.Err = client.Post(deleteURL(client, clusterId), reqBody, nil, nil) 176 return 177 }