github.com/opensearch-project/opensearch-go/v2@v2.3.0/opensearchapi/opensearchapi_integration_test.go (about) 1 // SPDX-License-Identifier: Apache-2.0 2 // 3 // The OpenSearch Contributors require contributions made to 4 // this file be licensed under the Apache-2.0 license or a 5 // compatible open source license. 6 // 7 // Modifications Copyright OpenSearch Contributors. See 8 // GitHub history for details. 9 10 // Licensed to Elasticsearch B.V. under one or more contributor 11 // license agreements. See the NOTICE file distributed with 12 // this work for additional information regarding copyright 13 // ownership. Elasticsearch B.V. licenses this file to you under 14 // the Apache License, Version 2.0 (the "License"); you may 15 // not use this file except in compliance with the License. 16 // You may obtain a copy of the License at 17 // 18 // http://www.apache.org/licenses/LICENSE-2.0 19 // 20 // Unless required by applicable law or agreed to in writing, 21 // software distributed under the License is distributed on an 22 // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 23 // KIND, either express or implied. See the License for the 24 // specific language governing permissions and limitations 25 // under the License. 26 27 // +build integration 28 29 package opensearchapi_test 30 31 import ( 32 "bytes" 33 "context" 34 "encoding/json" 35 "fmt" 36 "strings" 37 "testing" 38 "time" 39 40 "github.com/opensearch-project/opensearch-go/v2" 41 "github.com/opensearch-project/opensearch-go/v2/opensearchapi" 42 ) 43 44 func createTestIndex(client *opensearch.Client, index string) error { 45 var buf bytes.Buffer 46 // Index data 47 // 48 for j := 1; j <= 1000; j++ { 49 meta := []byte(fmt.Sprintf(`{ "index" : { "_id" : "%d" } }%s`, j, "\n")) 50 data := []byte(`{"content":"` + strings.Repeat("ABC", 100) + `"}`) 51 data = append(data, "\n"...) 52 53 buf.Grow(len(meta) + len(data)) 54 buf.Write(meta) 55 buf.Write(data) 56 } 57 _, err := client.Bulk(bytes.NewReader(buf.Bytes()), client.Bulk.WithIndex(index), client.Bulk.WithRefresh("true")) 58 return err 59 } 60 61 func TestAPI(t *testing.T) { 62 t.Run("Search", func(t *testing.T) { 63 client, err := opensearch.NewDefaultClient() 64 if err != nil { 65 t.Fatalf("Error creating the client: %s\n", err) 66 } 67 68 client.Cluster.Health(client.Cluster.Health.WithWaitForStatus("yellow")) 69 res, err := client.Search(client.Search.WithTimeout(500 * time.Millisecond)) 70 if err != nil { 71 t.Fatalf("Error getting the response: %s\n", err) 72 } 73 defer res.Body.Close() 74 75 if res.IsError() { 76 t.Fatalf("Error response: %s", res.String()) 77 } 78 79 var d map[string]interface{} 80 err = json.NewDecoder(res.Body).Decode(&d) 81 if err != nil { 82 t.Fatalf("Error parsing the response: %s\n", err) 83 } 84 fmt.Printf("took=%vms\n", d["took"]) 85 }) 86 87 t.Run("Headers", func(t *testing.T) { 88 client, err := opensearch.NewDefaultClient() 89 if err != nil { 90 t.Fatalf("Error creating the client: %s\n", err) 91 } 92 93 res, err := client.Info(client.Info.WithHeader(map[string]string{"Accept": "application/yaml"})) 94 if err != nil { 95 t.Fatalf("Error getting the response: %s\n", err) 96 } 97 defer res.Body.Close() 98 99 if res.IsError() { 100 t.Fatalf("Error response: %s", res.String()) 101 } 102 103 if !strings.HasPrefix(res.String(), "[200 OK] ---") { 104 t.Errorf("Unexpected response body: doesn't start with '[200 OK] ---'; %s", res.String()) 105 } 106 }) 107 108 t.Run("OpaqueID", func(t *testing.T) { 109 var ( 110 res *opensearchapi.Response 111 err error 112 113 requestID = "reindex-123" 114 ) 115 116 client, err := opensearch.NewDefaultClient() 117 if err != nil { 118 t.Fatalf("Error creating the client: %s\n", err) 119 } 120 121 // Prepare indices 122 // 123 client.Indices.Delete([]string{"test", "reindexed"}, client.Indices.Delete.WithIgnoreUnavailable(true)) 124 125 // Index data 126 // 127 err = createTestIndex(client, "test") 128 if err != nil { 129 t.Fatalf("Failed to index data: %s", err) 130 } 131 132 // Launch reindexing task with wait_for_completion=false 133 // 134 res, err = client.Reindex( 135 strings.NewReader(`{"source":{"index":"test"}, "dest": {"index":"reindexed"}}`), 136 client.Reindex.WithWaitForCompletion(false), 137 client.Reindex.WithRequestsPerSecond(1), 138 client.Reindex.WithOpaqueID(requestID)) 139 if err != nil { 140 t.Fatalf("Failed to reindex: %s", err) 141 } 142 if res.IsError() { 143 t.Fatalf("Failed to reindex: %s", res.Status()) 144 } 145 time.Sleep(10 * time.Millisecond) 146 147 res, err = client.Tasks.List(client.Tasks.List.WithPretty()) 148 if err != nil { 149 t.Fatalf("ERROR: %s", err) 150 } 151 res.Body.Close() 152 if res.IsError() { 153 t.Fatalf("Failed to get tasks: %s", res.Status()) 154 } 155 156 // Get the list of tasks 157 // 158 res, err = client.Tasks.List(client.Tasks.List.WithPretty()) 159 if err != nil { 160 t.Fatalf("ERROR: %s", err) 161 } 162 defer res.Body.Close() 163 164 if res.IsError() { 165 t.Fatalf("Failed to get tasks: %s", res.Status()) 166 } 167 168 type task struct { 169 Node string 170 ID int 171 Action string 172 RunningTime time.Duration `json:"running_time_in_nanos"` 173 Cancellable bool 174 Headers map[string]interface{} 175 } 176 177 type node struct { 178 Tasks map[string]task 179 } 180 181 var nodes map[string]map[string]node 182 if err := json.NewDecoder(res.Body).Decode(&nodes); err != nil { 183 t.Fatalf("Failed to decode response: %s", err) 184 } 185 186 var hasReindexTask bool 187 188 for _, n := range nodes["nodes"] { 189 for taskID, task := range n.Tasks { 190 if task.Headers["X-Opaque-Id"] == requestID { 191 if strings.Contains(task.Action, "reindex") { 192 hasReindexTask = true 193 } 194 fmt.Printf("* %s, %s | %s (%s)\n", requestID, taskID, task.Action, task.RunningTime) 195 } 196 } 197 } 198 199 if !hasReindexTask { 200 t.Errorf("Expected reindex task in %+v", nodes["nodes"]) 201 } 202 203 for _, n := range nodes["nodes"] { 204 for taskID, task := range n.Tasks { 205 if task.Headers["X-Opaque-Id"] == requestID { 206 if task.Cancellable { 207 fmt.Printf("=> Closing task %s\n", taskID) 208 res, err = client.Tasks.Cancel(client.Tasks.Cancel.WithTaskID(taskID)) 209 if err != nil { 210 t.Fatalf("ERROR: %s", err) 211 } 212 res.Body.Close() 213 if res.IsError() { 214 t.Fatalf("Failed to cancel task: %s", res) 215 } 216 } 217 } 218 } 219 } 220 }) 221 t.Run("Snapshot", func(t *testing.T) { 222 // Functio to perform requests 223 // 224 opensearchDo := func(ctx context.Context, client *opensearch.Client, req opensearchapi.Request, msg string, t *testing.T) { 225 _, err := req.Do(ctx, client) 226 if err != nil { 227 t.Fatalf("Error performing the request: %s\n", err) 228 } 229 } 230 231 // Create Client 232 // 233 client, err := opensearch.NewDefaultClient() 234 if err != nil { 235 t.Fatalf("Error creating the client: %s\n", err) 236 } 237 238 // Pre Cleanup indices 239 // 240 iDeleteReq := &opensearchapi.IndicesDeleteRequest{ 241 Index: []string{"test", "test_restored"}, 242 IgnoreUnavailable: opensearchapi.BoolPtr(true), 243 } 244 ctx := context.Background() 245 opensearchDo(ctx, client, iDeleteReq, "index data", t) 246 247 // Index data 248 // 249 err = createTestIndex(client, "test") 250 if err != nil { 251 t.Fatalf("Failed to index data: %s", err) 252 } 253 254 // Test Snapshot functions 255 // 256 sRepoCreateReq := &opensearchapi.SnapshotCreateRepositoryRequest{ 257 Body: bytes.NewBufferString(`{"type":"fs","settings":{"location":"/usr/share/opensearch/mnt"}}`), 258 Repository: "snapshot-test", 259 } 260 opensearchDo(ctx, client, sRepoCreateReq, "create Snapshot Repository", t) 261 262 sRepoVerifyReq := &opensearchapi.SnapshotVerifyRepositoryRequest{ 263 Repository: "snapshot-test", 264 } 265 opensearchDo(ctx, client, sRepoVerifyReq, "verify Snapshot Repository", t) 266 267 sDeleteReq := &opensearchapi.SnapshotDeleteRequest{ 268 Snapshot: []string{"test", "clone-test"}, 269 Repository: "snapshot-test", 270 } 271 opensearchDo(ctx, client, sDeleteReq, "delete Snapshots", t) 272 273 sCreateReq := &opensearchapi.SnapshotCreateRequest{ 274 Body: bytes.NewBufferString(`{"indices":"test","ignore_unavailable":true,"include_global_state":false,"partial":false}`), 275 Snapshot: "test", 276 Repository: "snapshot-test", 277 WaitForCompletion: opensearchapi.BoolPtr(true), 278 } 279 opensearchDo(ctx, client, sCreateReq, "create Snapshot", t) 280 281 sCloneReq := &opensearchapi.SnapshotCloneRequest{ 282 Body: bytes.NewBufferString(`{"indices":"*"}`), 283 Snapshot: "test", 284 TargetSnapshot: "clone-test", 285 Repository: "snapshot-test", 286 } 287 opensearchDo(ctx, client, sCloneReq, "clone Snapshot", t) 288 289 sGetReq := &opensearchapi.SnapshotGetRequest{ 290 Snapshot: []string{"test", "clone-test"}, 291 Repository: "snapshot-test", 292 } 293 opensearchDo(ctx, client, sGetReq, "get Snapshots", t) 294 295 sStatusReq := &opensearchapi.SnapshotGetRequest{ 296 Snapshot: []string{"test", "clone-test"}, 297 Repository: "snapshot-test", 298 } 299 opensearchDo(ctx, client, sStatusReq, "get Snapshot status", t) 300 301 sRestoreReq := &opensearchapi.SnapshotRestoreRequest{ 302 Body: bytes.NewBufferString( 303 `{ 304 "indices":"test", 305 "ignore_unavailable":true, 306 "include_global_state":false, 307 "partial":false, 308 "rename_pattern": "(.+)", 309 "rename_replacement":"$1_restored" 310 }`, 311 ), 312 Snapshot: "clone-test", 313 Repository: "snapshot-test", 314 WaitForCompletion: opensearchapi.BoolPtr(true), 315 } 316 opensearchDo(ctx, client, sRestoreReq, "restore Snapshot", t) 317 318 opensearchDo(ctx, client, sDeleteReq, "delete Snapshots", t) 319 opensearchDo(ctx, client, iDeleteReq, "index data", t) 320 }) 321 t.Run("Point_in_Time", func(t *testing.T) { 322 var ( 323 err error 324 major, minor int64 325 data opensearchapi.InfoResp 326 ) 327 index := "test" 328 329 // Create Client 330 // 331 client, err := opensearch.NewDefaultClient() 332 if err != nil { 333 t.Fatalf("Error creating the client: %s\n", err) 334 } 335 336 // Skip test if Cluster version is below 2.4.0 337 infoResp, err := client.Info() 338 if err != nil { 339 t.Fatalf("Error getting the cluster info: %s\n", err) 340 } 341 if err = json.NewDecoder(infoResp.Body).Decode(&data); err != nil { 342 t.Fatalf("Error parsing the cluster info: %s\n", err) 343 } 344 major, minor, _, err = opensearch.ParseVersion(data.Version.Number) 345 if err != nil { 346 t.Fatalf("Error parsing the cluster version") 347 } 348 if major <= 2 && minor < 4 { 349 return 350 } 351 352 // Cleanup all existing Pits 353 // 354 resp, _, err := client.PointInTime.Delete(client.PointInTime.Delete.WithPitID("_all")) 355 if err != nil { 356 if resp != nil && resp.StatusCode != 404 { 357 t.Fatalf("Failed to Delete all Pits: %s", err) 358 } 359 } 360 361 // Index data 362 // 363 err = createTestIndex(client, index) 364 if err != nil { 365 t.Fatalf("Failed to index data: %s", err) 366 } 367 368 // Create a Pit 369 // 370 keepAlive, _ := time.ParseDuration("5m") 371 _, pitCreateResp, err := client.PointInTime.Create(client.PointInTime.Create.WithKeepAlive(keepAlive), client.PointInTime.Create.WithIndex(index)) 372 if err != nil { 373 t.Fatalf("Failed to create Pit: %s", err) 374 } 375 376 // Get all Pits 377 // 378 _, pitGetResp, err := client.PointInTime.Get() 379 if err != nil { 380 t.Fatalf("Failed to get Pits: %s", err) 381 } 382 383 // Delete the create Pit 384 // 385 _, pitDeleteResp, err := client.PointInTime.Delete(client.PointInTime.Delete.WithPitID(pitCreateResp.PitID)) 386 if err != nil { 387 t.Fatalf("Failed to delete Pit: %s", err) 388 } 389 if (pitCreateResp.PitID != pitGetResp.Pits[0].PitID) || (pitCreateResp.PitID != pitDeleteResp.Pits[0].PitID) { 390 t.Fatalf("The create Pit does not match the Get Pit or Deleted Pit") 391 } 392 }) 393 }