github.com/NVIDIA/aistore@v1.3.23-0.20240517131212-7df6609be51d/ais/test/bucket_test.go (about) 1 // Package integration_test. 2 /* 3 * Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved. 4 */ 5 package integration_test 6 7 import ( 8 "context" 9 "fmt" 10 "math/rand" 11 "net/http" 12 "sort" 13 "strconv" 14 "strings" 15 "sync" 16 "testing" 17 "time" 18 19 "github.com/NVIDIA/aistore/api" 20 "github.com/NVIDIA/aistore/api/apc" 21 "github.com/NVIDIA/aistore/cmn" 22 "github.com/NVIDIA/aistore/cmn/cos" 23 "github.com/NVIDIA/aistore/core/meta" 24 "github.com/NVIDIA/aistore/tools" 25 "github.com/NVIDIA/aistore/tools/docker" 26 "github.com/NVIDIA/aistore/tools/readers" 27 "github.com/NVIDIA/aistore/tools/tassert" 28 "github.com/NVIDIA/aistore/tools/tlog" 29 "github.com/NVIDIA/aistore/tools/trand" 30 "github.com/NVIDIA/aistore/xact" 31 "golang.org/x/sync/errgroup" 32 ) 33 34 var ( 35 fltPresentEnum = []int{apc.FltExists, apc.FltExistsNoProps, apc.FltPresent, apc.FltExistsOutside} 36 fltPresentText = map[int]string{ 37 apc.FltExists: "flt-exists", 38 apc.FltExistsNoProps: "flt-exists-no-props", 39 apc.FltPresent: "flt-present", 40 apc.FltExistsOutside: "flt-exists-outside", 41 } 42 ) 43 44 func TestHTTPProviderBucket(t *testing.T) { 45 var ( 46 bck = cmn.Bck{ 47 Name: t.Name() + "Bucket", 48 Provider: apc.HTTP, 49 } 50 proxyURL = tools.RandomProxyURL(t) 51 baseParams = tools.BaseAPIParams(proxyURL) 52 ) 53 54 err := api.CreateBucket(baseParams, bck, nil) 55 tassert.Fatalf(t, err != nil, "expected error") 56 57 _, err = api.GetObject(baseParams, bck, "nonexisting", nil) 58 tassert.Fatalf(t, err != nil, "expected error") 59 60 _, err = api.ListObjects(baseParams, bck, nil, api.ListArgs{}) 61 tassert.Fatalf(t, err != nil, "expected error") 62 63 reader, _ := readers.NewRand(cos.KiB, cos.ChecksumNone) 64 _, err = api.PutObject(&api.PutArgs{ 65 BaseParams: baseParams, 66 Bck: bck, 67 ObjName: "something", 68 Reader: reader, 69 }) 70 tassert.Fatalf(t, err != nil, "expected error") 71 } 72 73 func TestListBuckets(t *testing.T) { 74 var ( 75 bck = cmn.Bck{Name: t.Name() + "Bucket", Provider: apc.AIS} 76 proxyURL = tools.RandomProxyURL(t) 77 baseParams = tools.BaseAPIParams(proxyURL) 78 pnums = make(map[string]cmn.Bcks) 79 ) 80 tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/) 81 82 bcks, err := api.ListBuckets(baseParams, cmn.QueryBcks{}, apc.FltExists) 83 tassert.CheckFatal(t, err) 84 85 for provider := range apc.Providers { 86 qbck := cmn.QueryBcks{Provider: provider} 87 bcks := bcks.Select(qbck) 88 tlog.Logf("%s:\t%2d bucket%s\n", apc.ToScheme(provider), len(bcks), cos.Plural(len(bcks))) 89 pnums[provider] = bcks 90 } 91 config := tools.GetClusterConfig(t) 92 // tests: vs configured backend vs count 93 for provider := range apc.Providers { 94 _, configured := config.Backend.Providers[provider] 95 qbck := cmn.QueryBcks{Provider: provider} 96 bcks, err := api.ListBuckets(baseParams, qbck, apc.FltExists) 97 if err != nil { 98 if !configured { 99 continue 100 } 101 tassert.CheckError(t, err) 102 } else if apc.IsCloudProvider(provider) && !configured { 103 t.Fatalf("%s is not configured: expecting list-buckets to fail, got %v\n", provider, bcks) 104 } 105 if num := len(bcks.Select(qbck)); len(bcks) != num { 106 t.Fatalf("%s: num buckets(1): %d != %d\n", provider, len(bcks), num) 107 } 108 if len(bcks) != len(pnums[provider]) { 109 t.Fatalf("%s: num buckets(2): %d != %d\n", provider, len(bcks), len(pnums[provider])) 110 } 111 } 112 113 // tests: vs present vs exist-outside, etc. 114 for provider := range apc.Providers { 115 if _, configured := config.Backend.Providers[provider]; !configured { 116 continue 117 } 118 qbck := cmn.QueryBcks{Provider: provider} 119 presbcks, err := api.ListBuckets(baseParams, qbck, apc.FltPresent) 120 tassert.CheckFatal(t, err) 121 if qbck.Provider == apc.AIS { 122 tassert.Fatalf(t, len(presbcks) > 0, "at least one ais bucket must be present") 123 continue 124 } 125 // making it present if need be 126 if len(presbcks) == 0 { 127 if len(pnums[provider]) == 0 { 128 continue 129 } 130 bcks, i := pnums[provider], 0 131 if len(bcks) > 1 { 132 i = rand.Intn(len(bcks)) 133 } 134 pbck := bcks[i] 135 _, err := api.HeadBucket(baseParams, pbck, false /* don't add */) 136 tassert.CheckFatal(t, err) 137 138 presbcks, err = api.ListBuckets(baseParams, qbck, apc.FltPresent) 139 tassert.CheckFatal(t, err) 140 141 tlog.Logf("%s: now present %s\n", provider, pbck) 142 t.Cleanup(func() { 143 err = api.EvictRemoteBucket(baseParams, pbck, false /*keep md*/) 144 tassert.CheckFatal(t, err) 145 tlog.Logf("[cleanup] %s evicted\n", pbck) 146 }) 147 } 148 149 b := presbcks[0] 150 err = api.EvictRemoteBucket(baseParams, b, false /*keep md*/) 151 tassert.CheckFatal(t, err) 152 153 evbcks, err := api.ListBuckets(baseParams, qbck, apc.FltPresent) 154 tassert.CheckFatal(t, err) 155 tassert.Fatalf(t, len(presbcks) == len(evbcks)+1, "%s: expected one bucket less present after evicting %s (%d, %d)", 156 provider, b, len(presbcks), len(evbcks)) 157 158 outbcks, err := api.ListBuckets(baseParams, qbck, apc.FltExistsOutside) 159 tassert.CheckFatal(t, err) 160 tassert.Fatalf(t, len(outbcks) > 0, "%s: expected at least one (evicted) bucket to \"exist outside\"", provider) 161 162 allbcks, err := api.ListBuckets(baseParams, qbck, apc.FltExistsNoProps) 163 tassert.CheckFatal(t, err) 164 tassert.Fatalf(t, len(allbcks) == len(outbcks)+len(presbcks)-1, 165 "%s: expected present + outside == all (%d, %d, %d)", provider, len(presbcks)-1, len(outbcks), len(allbcks)) 166 167 _, err = api.HeadBucket(baseParams, b, false /* don't add */) 168 tassert.CheckFatal(t, err) 169 presbcks2, err := api.ListBuckets(baseParams, qbck, apc.FltPresentNoProps) 170 tassert.CheckFatal(t, err) 171 tassert.Fatalf(t, len(presbcks2) == len(presbcks), "%s: expected num present back to original (%d, %d)", 172 provider, len(presbcks2), len(presbcks)) 173 } 174 175 // tests: NsGlobal 176 qbck := cmn.QueryBcks{Provider: apc.AIS, Ns: cmn.NsGlobal} 177 aisBuckets, err := api.ListBuckets(baseParams, qbck, apc.FltExists) 178 tassert.CheckError(t, err) 179 if len(aisBuckets) != len(bcks.Select(qbck)) { 180 t.Fatalf("ais buckets: %d != %d\n", len(aisBuckets), len(bcks.Select(qbck))) 181 } 182 183 // tests: NsAnyRemote 184 qbck = cmn.QueryBcks{Ns: cmn.NsAnyRemote} 185 bcks, err = api.ListBuckets(baseParams, qbck, apc.FltExists) 186 tassert.CheckError(t, err) 187 qbck = cmn.QueryBcks{Provider: apc.AIS, Ns: cmn.NsAnyRemote} 188 aisBuckets, err = api.ListBuckets(baseParams, qbck, apc.FltExists) 189 tassert.CheckError(t, err) 190 if len(aisBuckets) != len(bcks.Select(qbck)) { 191 t.Fatalf("ais buckets: %d != %d\n", len(aisBuckets), len(bcks.Select(qbck))) 192 } 193 } 194 195 // NOTE: for remote bucket, enter the bucket name directly (as TestMain makes it "present" at init time) 196 func TestGetBucketInfo(t *testing.T) { 197 var ( 198 proxyURL = tools.RandomProxyURL(t) 199 baseParams = tools.BaseAPIParams(proxyURL) 200 bck = cliBck 201 isPresent bool 202 ) 203 if bck.IsRemote() { 204 _, _, _, err := api.GetBucketInfo(baseParams, bck, api.BinfoArgs{FltPresence: apc.FltPresent}) 205 isPresent = err == nil 206 } 207 for _, fltPresence := range fltPresentEnum { 208 text := fltPresentText[fltPresence] 209 tlog.Logf("%q %s\n", text, strings.Repeat("-", 60-len(text))) 210 args := api.BinfoArgs{ 211 UUID: "", 212 FltPresence: fltPresence, 213 } 214 215 if apc.IsFltNoProps(fltPresence) { 216 // (fast path) 217 } else { 218 args.Summarize = true 219 args.WithRemote = bck.IsRemote() 220 } 221 222 xid, props, info, err := api.GetBucketInfo(baseParams, bck, args) 223 if err != nil { 224 if herr := cmn.Str2HTTPErr(err.Error()); herr != nil { 225 tlog.Logln(herr.TypeCode + ": " + herr.Message) 226 } else { 227 tlog.Logln(err.Error()) 228 } 229 } else { 230 ps := "bucket-props = nil" 231 if props != nil { 232 ps = fmt.Sprintf("bucket-props(mirror) %+v", props.Mirror) 233 } 234 tlog.Logf("%s: %s\n", bck.Cname(""), ps) 235 236 is := "bucket-summary = nil" 237 if info != nil { 238 is = fmt.Sprintf("bucket-summary %+v", info.BsummResult) 239 } 240 tlog.Logf("x-%s[%s] %s: %s\n", apc.ActSummaryBck, xid, bck.Cname(""), is) 241 } 242 if bck.IsRemote() && !isPresent { 243 // undo the side effect of calling api.GetBucketInfo 244 _ = api.EvictRemoteBucket(baseParams, bck, false) 245 } 246 tlog.Logln("") 247 } 248 if bck.IsRemote() { 249 _, _, _, err := api.GetBucketInfo(baseParams, bck, api.BinfoArgs{FltPresence: apc.FltPresent}) 250 isPresentEnd := err == nil 251 tassert.Errorf(t, isPresent == isPresentEnd, "presence in the beginning (%t) != (%t) at the end", 252 isPresent, isPresentEnd) 253 } 254 } 255 256 func TestDefaultBucketProps(t *testing.T) { 257 const dataSlices = 7 258 var ( 259 proxyURL = tools.RandomProxyURL(t) 260 baseParams = tools.BaseAPIParams(proxyURL) 261 globalConfig = tools.GetClusterConfig(t) 262 bck = cmn.Bck{Name: testBucketName, Provider: apc.AIS} 263 ) 264 tools.SetClusterConfig(t, cos.StrKVs{ 265 "ec.enabled": "true", 266 "ec.data_slices": strconv.FormatUint(dataSlices, 10), 267 }) 268 defer tools.SetClusterConfig(t, cos.StrKVs{ 269 "ec.enabled": "false", 270 "ec.data_slices": strconv.Itoa(globalConfig.EC.DataSlices), 271 "ec.parity_slices": strconv.Itoa(globalConfig.EC.ParitySlices), 272 }) 273 274 tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/) 275 276 p, err := api.HeadBucket(baseParams, bck, true /* don't add */) 277 tassert.CheckFatal(t, err) 278 if !p.EC.Enabled { 279 t.Error("EC should be enabled for ais buckets") 280 } 281 if p.EC.DataSlices != dataSlices { 282 t.Errorf("Invalid number of EC data slices: expected %d, got %d", dataSlices, p.EC.DataSlices) 283 } 284 } 285 286 func TestCreateWithBucketProps(t *testing.T) { 287 var ( 288 proxyURL = tools.RandomProxyURL(t) 289 baseParams = tools.BaseAPIParams(proxyURL) 290 bck = cmn.Bck{Name: testBucketName, Provider: apc.AIS} 291 ) 292 propsToSet := &cmn.BpropsToSet{ 293 Cksum: &cmn.CksumConfToSet{ 294 Type: apc.Ptr(cos.ChecksumMD5), 295 ValidateWarmGet: apc.Ptr(true), 296 EnableReadRange: apc.Ptr(true), 297 ValidateColdGet: apc.Ptr(false), 298 ValidateObjMove: apc.Ptr(true), 299 }, 300 WritePolicy: &cmn.WritePolicyConfToSet{ 301 Data: apc.Ptr(apc.WriteImmediate), 302 MD: apc.Ptr(apc.WriteNever), 303 }, 304 } 305 tools.CreateBucket(t, proxyURL, bck, propsToSet, true /*cleanup*/) 306 307 p, err := api.HeadBucket(baseParams, bck, true /* don't add */) 308 tassert.CheckFatal(t, err) 309 validateBucketProps(t, propsToSet, p) 310 } 311 312 func TestCreateRemoteBucket(t *testing.T) { 313 var ( 314 proxyURL = tools.RandomProxyURL(t) 315 baseParams = tools.BaseAPIParams(proxyURL) 316 bck = cliBck 317 ) 318 tools.CheckSkip(t, &tools.SkipTestArgs{RemoteBck: true, Bck: bck}) 319 exists, _ := tools.BucketExists(nil, tools.GetPrimaryURL(), bck) 320 tests := []struct { 321 bck cmn.Bck 322 props *cmn.BpropsToSet 323 exists bool 324 }{ 325 {bck: bck, exists: exists}, 326 {bck: cmn.Bck{Provider: cliBck.Provider, Name: trand.String(10)}}, 327 } 328 for _, test := range tests { 329 err := api.CreateBucket(baseParams, test.bck, test.props) 330 if err == nil { 331 continue 332 } 333 herr := cmn.Err2HTTPErr(err) 334 tassert.Fatalf(t, herr != nil, "expected ErrHTTP, got %v (bucket %q)", err, test.bck) 335 if test.exists { 336 tassert.Fatalf(t, strings.Contains(herr.Message, "already exists"), 337 "expecting \"already exists\", got %+v", herr) 338 } else { 339 tassert.Fatalf(t, herr.Status == http.StatusNotImplemented || strings.Contains(herr.Message, "support"), 340 "expecting 501 status or unsupported, got %+v", herr) 341 } 342 } 343 } 344 345 func TestCreateDestroyRemoteAISBucket(t *testing.T) { 346 t.Run("withObjects", func(t *testing.T) { testCreateDestroyRemoteAISBucket(t, true) }) 347 t.Run("withoutObjects", func(t *testing.T) { testCreateDestroyRemoteAISBucket(t, false) }) 348 } 349 350 func testCreateDestroyRemoteAISBucket(t *testing.T, withObjects bool) { 351 tools.CheckSkip(t, &tools.SkipTestArgs{RequiresRemoteCluster: true}) 352 bck := cmn.Bck{ 353 Name: trand.String(10), 354 Provider: apc.AIS, 355 Ns: cmn.Ns{ 356 UUID: tools.RemoteCluster.UUID, 357 }, 358 } 359 tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/) 360 _, err := api.HeadBucket(baseParams, bck, true /* don't add */) 361 tassert.CheckFatal(t, err) 362 if withObjects { 363 m := ioContext{ 364 t: t, 365 num: 1000, 366 fileSize: cos.KiB, 367 fixedSize: true, 368 bck: bck, 369 } 370 m.init(true /*cleanup*/) 371 m.puts() 372 } 373 374 err = api.DestroyBucket(baseParams, bck) 375 tassert.CheckFatal(t, err) 376 377 tlog.Logf("%s destroyed\n", bck.Cname("")) 378 bcks, err := api.ListBuckets(baseParams, cmn.QueryBcks(bck), apc.FltExists) 379 tassert.CheckFatal(t, err) 380 tassert.Fatalf(t, !tools.BucketsContain(bcks, cmn.QueryBcks(bck)), "expected bucket to not be listed") 381 } 382 383 func TestOverwriteLomCache(t *testing.T) { 384 for _, mdwrite := range []apc.WritePolicy{apc.WriteImmediate, apc.WriteNever} { 385 name := string(mdwrite) 386 if name == "" { 387 name = "write-immediate" 388 } else { 389 name = "write-" + name 390 } 391 t.Run(name, func(t *testing.T) { 392 overwriteLomCache(mdwrite, t) 393 }) 394 } 395 } 396 397 func overwriteLomCache(mdwrite apc.WritePolicy, t *testing.T) { 398 var ( 399 m = ioContext{ 400 t: t, 401 num: 234, 402 fileSize: 73, 403 fixedSize: true, 404 prefix: trand.String(6) + "-", 405 } 406 baseParams = tools.BaseAPIParams() 407 ) 408 if testing.Short() { 409 m.num = 50 410 } 411 m.init(true /*cleanup*/) 412 m.smap = tools.GetClusterMap(m.t, m.proxyURL) 413 414 for _, target := range m.smap.Tmap.ActiveNodes() { 415 mpList, err := api.GetMountpaths(baseParams, target) 416 tassert.CheckFatal(t, err) 417 l := len(mpList.Available) 418 tassert.Fatalf(t, l >= 2, "%s has %d mountpaths, need at least 2", target, l) 419 } 420 tlog.Logf("Create %s(mirrored, write-policy-md=%s)\n", m.bck, mdwrite) 421 propsToSet := &cmn.BpropsToSet{ 422 Mirror: &cmn.MirrorConfToSet{Enabled: apc.Ptr(true)}, 423 WritePolicy: &cmn.WritePolicyConfToSet{ 424 Data: apc.Ptr(apc.WriteImmediate), 425 MD: apc.Ptr(mdwrite), 426 }, 427 } 428 tools.CreateBucket(t, m.proxyURL, m.bck, propsToSet, true /*cleanup*/) 429 430 m.puts() 431 432 // NOTE: not waiting here for apc.ActPutCopies 433 434 tlog.Logf("List %s\n", m.bck) 435 msg := &apc.LsoMsg{Props: apc.GetPropsName} 436 objList, err := api.ListObjects(baseParams, m.bck, msg, api.ListArgs{}) 437 tassert.CheckFatal(t, err) 438 tassert.Fatalf(t, len(objList.Entries) == m.num, "expecting %d entries, have %d", 439 m.num, len(objList.Entries)) 440 441 tlog.Logf("Overwrite %s objects with newer versions\n", m.bck) 442 nsize := int64(m.fileSize) * 10 443 for _, en := range objList.Entries { 444 reader, err := readers.NewRand(nsize, cos.ChecksumNone) 445 tassert.CheckFatal(t, err) 446 _, err = api.PutObject(&api.PutArgs{ 447 BaseParams: baseParams, 448 Bck: m.bck, 449 ObjName: en.Name, 450 Reader: reader, 451 }) 452 tassert.CheckFatal(t, err) 453 } 454 // wait for pending writes (of the copies) 455 args := xact.ArgsMsg{Kind: apc.ActPutCopies, Bck: m.bck} 456 api.WaitForXactionIdle(baseParams, &args) 457 458 tlog.Logf("List %s new versions\n", m.bck) 459 msg = &apc.LsoMsg{} 460 msg.AddProps(apc.GetPropsAll...) 461 objList, err = api.ListObjects(baseParams, m.bck, msg, api.ListArgs{}) 462 tassert.CheckFatal(t, err) 463 tassert.Fatalf(t, len(objList.Entries) == m.num, "expecting %d entries, have %d", 464 m.num, len(objList.Entries)) 465 466 for _, en := range objList.Entries { 467 n, s, c := en.Name, en.Size, en.Copies 468 tassert.Fatalf(t, s == nsize, "%s: expecting size = %d, got %d", n, nsize, s) 469 tassert.Fatalf(t, c == 2, "%s: expecting copies = %d, got %d", n, 2, c) 470 } 471 } 472 473 func TestStressCreateDestroyBucket(t *testing.T) { 474 tools.CheckSkip(t, &tools.SkipTestArgs{Long: true}) 475 476 const ( 477 bckCount = 10 478 iterCount = 20 479 ) 480 481 var ( 482 baseParams = tools.BaseAPIParams() 483 group, _ = errgroup.WithContext(context.Background()) 484 ) 485 486 for range bckCount { 487 group.Go(func() error { 488 m := &ioContext{ 489 t: t, 490 num: 100, 491 silent: true, 492 } 493 494 m.init(true /*cleanup*/) 495 496 for range iterCount { 497 if err := api.CreateBucket(baseParams, m.bck, nil); err != nil { 498 return err 499 } 500 if rand.Intn(iterCount) == 0 { // just test couple times, no need to flood 501 if err := api.CreateBucket(baseParams, m.bck, nil); err == nil { 502 return fmt.Errorf("expected error to occur on bucket %q - create second time", m.bck) 503 } 504 } 505 m.puts() 506 if _, err := api.ListObjects(baseParams, m.bck, nil, api.ListArgs{}); err != nil { 507 return err 508 } 509 m.gets(nil, false) 510 if err := api.DestroyBucket(baseParams, m.bck); err != nil { 511 return err 512 } 513 if rand.Intn(iterCount) == 0 { // just test couple times, no need to flood 514 if err := api.DestroyBucket(baseParams, m.bck); err == nil { 515 return fmt.Errorf("expected error to occur on bucket %q - destroy second time", m.bck) 516 } 517 } 518 } 519 return nil 520 }) 521 } 522 err := group.Wait() 523 tassert.CheckFatal(t, err) 524 } 525 526 func TestResetBucketProps(t *testing.T) { 527 var ( 528 proxyURL = tools.RandomProxyURL(t) 529 globalConfig = tools.GetClusterConfig(t) 530 baseParams = tools.BaseAPIParams(proxyURL) 531 bck = cmn.Bck{ 532 Name: testBucketName, 533 Provider: apc.AIS, 534 } 535 propsToSet = &cmn.BpropsToSet{ 536 Cksum: &cmn.CksumConfToSet{ 537 Type: apc.Ptr(cos.ChecksumNone), 538 ValidateWarmGet: apc.Ptr(true), 539 EnableReadRange: apc.Ptr(true), 540 }, 541 EC: &cmn.ECConfToSet{ 542 Enabled: apc.Ptr(false), 543 DataSlices: apc.Ptr(1), 544 ParitySlices: apc.Ptr(2), 545 }, 546 } 547 ) 548 tools.CheckSkip(t, &tools.SkipTestArgs{ 549 MinTargets: *propsToSet.EC.DataSlices + *propsToSet.EC.ParitySlices, 550 }) 551 552 tools.SetClusterConfig(t, cos.StrKVs{"ec.enabled": "true"}) 553 defer tools.SetClusterConfig(t, cos.StrKVs{ 554 "ec.enabled": "false", 555 "ec.data_slices": strconv.Itoa(globalConfig.EC.DataSlices), 556 "ec.parity_slices": strconv.Itoa(globalConfig.EC.ParitySlices), 557 }) 558 559 tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/) 560 561 defaultProps, err := api.HeadBucket(baseParams, bck, true /* don't add */) 562 tassert.CheckFatal(t, err) 563 564 _, err = api.SetBucketProps(baseParams, bck, propsToSet) 565 tassert.CheckFatal(t, err) 566 567 p, err := api.HeadBucket(baseParams, bck, true /* don't add */) 568 tassert.CheckFatal(t, err) 569 570 // check that bucket props do get set 571 validateBucketProps(t, propsToSet, p) 572 _, err = api.ResetBucketProps(baseParams, bck) 573 tassert.CheckFatal(t, err) 574 575 p, err = api.HeadBucket(baseParams, bck, true /* don't add */) 576 tassert.CheckFatal(t, err) 577 578 if !p.Equal(defaultProps) { 579 t.Errorf("props have not been reset properly: expected: %+v, got: %+v", defaultProps, p) 580 } 581 } 582 583 func TestSetInvalidBucketProps(t *testing.T) { 584 var ( 585 proxyURL = tools.RandomProxyURL(t) 586 baseParams = tools.BaseAPIParams(proxyURL) 587 bck = cmn.Bck{ 588 Name: testBucketName, 589 Provider: apc.AIS, 590 } 591 592 tests = []struct { 593 name string 594 props *cmn.BpropsToSet 595 }{ 596 { 597 name: "humongous number of copies", 598 props: &cmn.BpropsToSet{ 599 Mirror: &cmn.MirrorConfToSet{ 600 Enabled: apc.Ptr(true), 601 Copies: apc.Ptr[int64](120), 602 }, 603 }, 604 }, 605 { 606 name: "too many copies", 607 props: &cmn.BpropsToSet{ 608 Mirror: &cmn.MirrorConfToSet{ 609 Enabled: apc.Ptr(true), 610 Copies: apc.Ptr[int64](12), 611 }, 612 }, 613 }, 614 { 615 name: "humongous number of slices", 616 props: &cmn.BpropsToSet{ 617 EC: &cmn.ECConfToSet{ 618 Enabled: apc.Ptr(true), 619 ParitySlices: apc.Ptr(120), 620 }, 621 }, 622 }, 623 { 624 name: "too many slices", 625 props: &cmn.BpropsToSet{ 626 EC: &cmn.ECConfToSet{ 627 Enabled: apc.Ptr(true), 628 ParitySlices: apc.Ptr(12), 629 }, 630 }, 631 }, 632 { 633 name: "enable both ec and mirroring", 634 props: &cmn.BpropsToSet{ 635 EC: &cmn.ECConfToSet{Enabled: apc.Ptr(true)}, 636 Mirror: &cmn.MirrorConfToSet{Enabled: apc.Ptr(true)}, 637 }, 638 }, 639 } 640 ) 641 642 tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/) 643 644 for _, test := range tests { 645 t.Run(test.name, func(t *testing.T) { 646 _, err := api.SetBucketProps(baseParams, bck, test.props) 647 if err == nil { 648 t.Error("expected error when setting bad input") 649 } 650 }) 651 } 652 } 653 654 func TestListObjectsRemoteBucketVersions(t *testing.T) { 655 var ( 656 m = ioContext{ 657 t: t, 658 bck: cliBck, 659 num: 50, 660 fileSize: 128, 661 prefix: trand.String(6) + "-", 662 } 663 baseParams = tools.BaseAPIParams() 664 ) 665 666 tools.CheckSkip(t, &tools.SkipTestArgs{Long: true, RemoteBck: true, Bck: m.bck}) 667 668 m.init(true /*cleanup*/) 669 670 p, err := api.HeadBucket(baseParams, m.bck, false /* don't add */) 671 tassert.CheckFatal(t, err) 672 673 if !p.Versioning.Enabled { 674 t.Skipf("%s requires a remote bucket with enabled versioning", t.Name()) 675 } 676 677 m.puts() 678 679 tlog.Logf("Listing %q objects\n", m.bck) 680 msg := &apc.LsoMsg{Prefix: m.prefix} 681 msg.AddProps(apc.GetPropsVersion, apc.GetPropsSize) 682 bckObjs, err := api.ListObjects(baseParams, m.bck, msg, api.ListArgs{}) 683 tassert.CheckFatal(t, err) 684 685 tlog.Logf("Checking %q object versions [total: %d]\n", m.bck, len(bckObjs.Entries)) 686 for _, en := range bckObjs.Entries { 687 tassert.Errorf(t, en.Size != 0, "object %s does not have size", en.Name) 688 tassert.Errorf(t, en.Version != "", "object %s does not have version", en.Name) 689 } 690 } 691 692 // Minimalistic list objects test to check that everything works correctly. 693 func TestListObjectsSmoke(t *testing.T) { 694 runProviderTests(t, func(t *testing.T, bck *meta.Bck) { 695 var ( 696 baseParams = tools.BaseAPIParams() 697 m = ioContext{ 698 t: t, 699 num: 100, 700 bck: bck.Clone(), 701 deleteRemoteBckObjs: true, 702 fileSize: 5 * cos.KiB, 703 } 704 705 iters = 5 706 msg = &apc.LsoMsg{PageSize: 10} 707 ) 708 709 m.init(true /*cleanup*/) 710 m.puts() 711 712 // Run couple iterations to see that we get deterministic results. 713 tlog.Logf("run %d list objects iterations\n", iters) 714 for iter := range iters { 715 objList, err := api.ListObjects(baseParams, m.bck, msg, api.ListArgs{}) 716 tassert.CheckFatal(t, err) 717 tassert.Errorf( 718 t, len(objList.Entries) == m.num, 719 "unexpected number of entries (got: %d, expected: %d) on iter: %d", 720 len(objList.Entries), m.num, iter, 721 ) 722 } 723 }) 724 } 725 726 func TestListObjectsGoBack(t *testing.T) { 727 runProviderTests(t, func(t *testing.T, bck *meta.Bck) { 728 var ( 729 baseParams = tools.BaseAPIParams() 730 m = ioContext{ 731 t: t, 732 num: 2000, 733 bck: bck.Clone(), 734 fileSize: 128, 735 } 736 737 msg = &apc.LsoMsg{PageSize: 50} 738 ) 739 740 if !bck.IsAIS() { 741 m.num = 300 742 } 743 744 m.init(true /*cleanup*/) 745 m.puts() 746 if m.bck.IsRemote() { 747 defer m.del() 748 } 749 var ( 750 tokens []string 751 entries cmn.LsoEntries 752 expectedEntries cmn.LsoEntries 753 ) 754 tlog.Logln("listing couple pages to move iterator on targets") 755 for range m.num / int(msg.PageSize) { 756 tokens = append(tokens, msg.ContinuationToken) 757 objPage, err := api.ListObjectsPage(baseParams, m.bck, msg, api.ListArgs{}) 758 tassert.CheckFatal(t, err) 759 expectedEntries = append(expectedEntries, objPage.Entries...) 760 } 761 762 tlog.Logln("list bucket's content in reverse order") 763 764 for i := len(tokens) - 1; i >= 0; i-- { 765 msg.ContinuationToken = tokens[i] 766 objPage, err := api.ListObjectsPage(baseParams, m.bck, msg, api.ListArgs{}) 767 tassert.CheckFatal(t, err) 768 entries = append(entries, objPage.Entries...) 769 } 770 771 cmn.SortLso(entries) 772 cmn.SortLso(expectedEntries) 773 774 tassert.Fatalf( 775 t, len(expectedEntries) == m.num, 776 "unexpected number of expected entries (got: %d, expected: %d)", 777 len(expectedEntries), m.num, 778 ) 779 780 tassert.Fatalf( 781 t, len(entries) == len(expectedEntries), 782 "unexpected number of entries (got: %d, expected: %d)", 783 len(entries), len(expectedEntries), 784 ) 785 786 for idx := range expectedEntries { 787 tassert.Errorf( 788 t, entries[idx].Name == expectedEntries[idx].Name, 789 "unexpected en (got: %q, expected: %q)", 790 entries[idx], expectedEntries[idx], 791 ) 792 } 793 }) 794 } 795 796 func TestListObjectsRerequestPage(t *testing.T) { 797 runProviderTests(t, func(t *testing.T, bck *meta.Bck) { 798 var ( 799 baseParams = tools.BaseAPIParams() 800 m = ioContext{ 801 t: t, 802 bck: bck.Clone(), 803 deleteRemoteBckObjs: true, 804 num: 500, 805 fileSize: 128, 806 } 807 rerequests = 5 808 ) 809 810 if !bck.IsAIS() { 811 m.num = 50 812 } 813 814 m.init(true /*cleanup*/) 815 m.puts() 816 if m.bck.IsRemote() { 817 defer m.del() 818 } 819 var ( 820 err error 821 objList *cmn.LsoRes 822 823 totalCnt = 0 824 msg = &apc.LsoMsg{PageSize: 10} 825 ) 826 tlog.Logln("starting rerequesting routine...") 827 for { 828 prevToken := msg.ContinuationToken 829 for range rerequests { 830 msg.ContinuationToken = prevToken 831 objList, err = api.ListObjectsPage(baseParams, m.bck, msg, api.ListArgs{}) 832 tassert.CheckFatal(t, err) 833 } 834 totalCnt += len(objList.Entries) 835 if objList.ContinuationToken == "" { 836 break 837 } 838 } 839 tassert.Fatalf( 840 t, totalCnt == m.num, 841 "unexpected total number of objects (got: %d, expected: %d)", totalCnt, m.num, 842 ) 843 }) 844 } 845 846 func TestListObjectsStartAfter(t *testing.T) { 847 runProviderTests(t, func(t *testing.T, bck *meta.Bck) { 848 var ( 849 baseParams = tools.BaseAPIParams() 850 m = ioContext{ 851 t: t, 852 num: 200, 853 bck: bck.Clone(), 854 fileSize: 128, 855 } 856 ) 857 858 if !bck.IsAIS() { 859 m.num = 20 860 } 861 862 m.init(true /*cleanup*/) 863 m.puts() 864 if m.bck.IsRemote() { 865 defer m.del() 866 } 867 objList, err := api.ListObjects(baseParams, m.bck, nil, api.ListArgs{}) 868 tassert.CheckFatal(t, err) 869 870 middleObjName := objList.Entries[m.num/2-1].Name 871 tlog.Logf("start listing bucket after: %q...\n", middleObjName) 872 873 msg := &apc.LsoMsg{PageSize: 10, StartAfter: middleObjName} 874 objList, err = api.ListObjects(baseParams, m.bck, msg, api.ListArgs{}) 875 if bck.IsAIS() { 876 tassert.CheckFatal(t, err) 877 tassert.Errorf( 878 t, len(objList.Entries) == m.num/2, 879 "unexpected number of entries (got: %d, expected: %d)", 880 len(objList.Entries), m.num/2, 881 ) 882 } else if err != nil { 883 herr := cmn.Err2HTTPErr(err) 884 tlog.Logf("Error is expected here, got %q\n", herr) 885 } else { 886 tassert.Errorf(t, false, "expected an error, got nil") 887 } 888 }) 889 } 890 891 func TestListObjectsProps(t *testing.T) { 892 runProviderTests(t, func(t *testing.T, bck *meta.Bck) { 893 var ( 894 baseParams = tools.BaseAPIParams() 895 m = ioContext{ 896 t: t, 897 num: rand.Intn(5000) + 1000, 898 bck: bck.Clone(), 899 fileSize: 128, 900 deleteRemoteBckObjs: true, 901 } 902 remoteVersioning bool 903 ) 904 905 if !bck.IsAIS() { 906 m.num = rand.Intn(250) + 100 907 } 908 909 m.init(true /*cleanup*/) 910 m.puts() 911 if m.bck.IsRemote() { 912 defer m.del() 913 914 s := "disabled" 915 p, err := api.HeadBucket(baseParams, m.bck, false /* don't add */) 916 tassert.CheckFatal(t, err) 917 if remoteVersioning = p.Versioning.Enabled; remoteVersioning { 918 s = "enabled" 919 } 920 tlog.Logf("%s: versioning is %s\n", m.bck.Cname(""), s) 921 } 922 checkProps := func(useCache bool, props []string, f func(en *cmn.LsoEnt)) { 923 msg := &apc.LsoMsg{PageSize: 100} 924 if useCache { 925 msg.SetFlag(apc.UseListObjsCache) 926 } 927 msg.AddProps(props...) 928 objList, err := api.ListObjects(baseParams, m.bck, msg, api.ListArgs{}) 929 tassert.CheckFatal(t, err) 930 tassert.Errorf( 931 t, len(objList.Entries) == m.num, 932 "unexpected number of entries (got: %d, expected: %d)", len(objList.Entries), m.num, 933 ) 934 for _, en := range objList.Entries { 935 tassert.Errorf(t, en.Name != "", "name is not set") 936 f(en) 937 } 938 } 939 940 for _, useCache := range []bool{false, true} { 941 tlog.Logf("[cache=%t] trying empty (minimal) subset of props...\n", useCache) 942 checkProps(useCache, []string{}, func(en *cmn.LsoEnt) { 943 tassert.Errorf(t, en.Name != "", "name is not set") 944 tassert.Errorf(t, en.Size != 0, "size is not set") 945 946 tassert.Errorf(t, en.Atime == "", "atime is set") 947 tassert.Errorf(t, en.Location == "", "target location is set %q", en.Location) 948 tassert.Errorf(t, en.Copies == 0, "copies is set") 949 }) 950 951 tlog.Logf("[cache=%t] trying ais-default subset of props...\n", useCache) 952 checkProps(useCache, apc.GetPropsDefaultAIS, func(en *cmn.LsoEnt) { 953 tassert.Errorf(t, en.Size != 0, "size is not set") 954 tassert.Errorf(t, en.Checksum != "", "checksum is not set") 955 tassert.Errorf(t, en.Atime != "", "atime is not set") 956 957 tassert.Errorf(t, en.Location == "", "target location is set %q", en.Location) 958 tassert.Errorf(t, en.Copies == 0, "copies is set") 959 }) 960 961 tlog.Logf("[cache=%t] trying cloud-default subset of props...\n", useCache) 962 checkProps(useCache, apc.GetPropsDefaultCloud, func(en *cmn.LsoEnt) { 963 tassert.Errorf(t, en.Size != 0, "size is not set") 964 tassert.Errorf(t, en.Checksum != "", "checksum is not set") 965 if bck.IsAIS() || remoteVersioning { 966 tassert.Errorf(t, en.Version != "", "version is not set") 967 } 968 tassert.Errorf(t, !m.bck.IsCloud() || en.Custom != "", "custom is not set") 969 970 tassert.Errorf(t, en.Atime == "", "atime is set") 971 tassert.Errorf(t, en.Copies == 0, "copies is set") 972 }) 973 974 tlog.Logf("[cache=%t] trying specific subset of props...\n", useCache) 975 checkProps(useCache, 976 []string{apc.GetPropsChecksum, apc.GetPropsVersion, apc.GetPropsCopies}, func(en *cmn.LsoEnt) { 977 tassert.Errorf(t, en.Checksum != "", "checksum is not set") 978 if bck.IsAIS() || remoteVersioning { 979 tassert.Errorf(t, en.Version != "", "version is not set: "+m.bck.Cname(en.Name)) 980 } 981 tassert.Errorf(t, en.Copies > 0, "copies is not set") 982 983 tassert.Errorf(t, en.Atime == "", "atime is set") 984 tassert.Errorf(t, en.Location == "", "target location is set %q", en.Location) 985 }) 986 987 tlog.Logf("[cache=%t] trying small subset of props...\n", useCache) 988 checkProps(useCache, []string{apc.GetPropsSize}, func(en *cmn.LsoEnt) { 989 tassert.Errorf(t, en.Size != 0, "size is not set") 990 991 tassert.Errorf(t, en.Atime == "", "atime is set") 992 tassert.Errorf(t, en.Location == "", "target location is set %q", en.Location) 993 tassert.Errorf(t, en.Copies == 0, "copies is set") 994 }) 995 996 tlog.Logf("[cache=%t] trying all props...\n", useCache) 997 checkProps(useCache, apc.GetPropsAll, func(en *cmn.LsoEnt) { 998 tassert.Errorf(t, en.Size != 0, "size is not set") 999 if bck.IsAIS() || remoteVersioning { 1000 tassert.Errorf(t, en.Version != "", "version is not set: "+m.bck.Cname(en.Name)) 1001 } 1002 tassert.Errorf(t, en.Checksum != "", "checksum is not set") 1003 tassert.Errorf(t, en.Atime != "", "atime is not set") 1004 tassert.Errorf(t, en.Location != "", "target location is not set [%#v]", en) 1005 tassert.Errorf(t, en.Copies != 0, "copies is not set") 1006 }) 1007 } 1008 }) 1009 } 1010 1011 // Runs remote list objects with `cached == true` (for both evicted and not evicted objects). 1012 func TestListObjectsRemoteCached(t *testing.T) { 1013 var ( 1014 baseParams = tools.BaseAPIParams() 1015 m = ioContext{ 1016 t: t, 1017 bck: cliBck, 1018 num: rand.Intn(100) + 10, 1019 fileSize: 128, 1020 } 1021 1022 remoteVersioning bool 1023 s = "disabled" 1024 ) 1025 tools.CheckSkip(t, &tools.SkipTestArgs{RemoteBck: true, Bck: m.bck}) 1026 1027 p, err := api.HeadBucket(baseParams, m.bck, false /* don't add */) 1028 tassert.CheckFatal(t, err) 1029 if remoteVersioning = p.Versioning.Enabled; remoteVersioning { 1030 s = "enabled" 1031 } 1032 tlog.Logf("%s: versioning is %s\n", m.bck.Cname(""), s) 1033 1034 m.init(true /*cleanup*/) 1035 1036 for _, evict := range []bool{false, true} { 1037 tlog.Logf("list remote objects with evict=%t\n", evict) 1038 m.remotePuts(evict) 1039 1040 msg := &apc.LsoMsg{PageSize: 10, Flags: apc.LsObjCached} 1041 msg.AddProps(apc.GetPropsDefaultAIS...) 1042 msg.AddProps(apc.GetPropsVersion) 1043 1044 objList, err := api.ListObjects(baseParams, m.bck, msg, api.ListArgs{}) 1045 tassert.CheckFatal(t, err) 1046 if evict { 1047 tassert.Errorf( 1048 t, len(objList.Entries) == 0, 1049 "unexpected number of entries (got: %d, expected: 0)", len(objList.Entries), 1050 ) 1051 } else { 1052 tassert.Errorf( 1053 t, len(objList.Entries) == m.num, 1054 "unexpected number of entries (got: %d, expected: %d)", len(objList.Entries), m.num, 1055 ) 1056 for _, en := range objList.Entries { 1057 tassert.Errorf(t, en.Name != "", "name is not set") 1058 tassert.Errorf(t, en.Size != 0, "size is not set") 1059 tassert.Errorf(t, en.Checksum != "", "checksum is not set") 1060 tassert.Errorf(t, en.Atime != "", "atime is not set") 1061 if remoteVersioning { 1062 tassert.Errorf(t, en.Version != "", "version is not set") 1063 } 1064 tassert.Errorf(t, en.Location == "", "target location is set %q", en.Location) 1065 tassert.Errorf(t, en.Copies == 0, "copies is set") 1066 } 1067 } 1068 } 1069 } 1070 1071 // Runs standard list objects but selects new random proxy every page. 1072 func TestListObjectsRandProxy(t *testing.T) { 1073 runProviderTests(t, func(t *testing.T, bck *meta.Bck) { 1074 var ( 1075 m = ioContext{ 1076 t: t, 1077 bck: bck.Clone(), 1078 num: rand.Intn(5000) + 1000, 1079 fileSize: 5 * cos.KiB, 1080 deleteRemoteBckObjs: true, 1081 } 1082 1083 totalCnt = 0 1084 msg = &apc.LsoMsg{PageSize: 100} 1085 ) 1086 1087 if !bck.IsAIS() { 1088 m.num = rand.Intn(300) + 100 1089 } 1090 1091 m.init(true /*cleanup*/) 1092 m.puts() 1093 if m.bck.IsRemote() { 1094 defer m.del() 1095 } 1096 for { 1097 baseParams := tools.BaseAPIParams() 1098 objList, err := api.ListObjectsPage(baseParams, m.bck, msg, api.ListArgs{}) 1099 tassert.CheckFatal(t, err) 1100 totalCnt += len(objList.Entries) 1101 if objList.ContinuationToken == "" { 1102 break 1103 } 1104 } 1105 tassert.Fatalf( 1106 t, totalCnt == m.num, 1107 "unexpected total number of objects (got: %d, expected: %d)", totalCnt, m.num, 1108 ) 1109 }) 1110 } 1111 1112 // Runs standard list objects but changes the page size every request. 1113 func TestListObjectsRandPageSize(t *testing.T) { 1114 runProviderTests(t, func(t *testing.T, bck *meta.Bck) { 1115 var ( 1116 totalCnt int 1117 baseParams = tools.BaseAPIParams() 1118 m = ioContext{ 1119 t: t, 1120 bck: bck.Clone(), 1121 num: rand.Intn(5000) + 1000, 1122 fileSize: 128, 1123 } 1124 msg = &apc.LsoMsg{Flags: apc.LsObjCached} 1125 ) 1126 1127 if !bck.IsAIS() { 1128 m.num = rand.Intn(200) + 100 1129 } 1130 1131 m.init(true /*cleanup*/) 1132 m.puts() 1133 if m.bck.IsRemote() { 1134 defer m.del() 1135 } 1136 for { 1137 msg.PageSize = rand.Int63n(50) + 50 1138 1139 objList, err := api.ListObjectsPage(baseParams, m.bck, msg, api.ListArgs{}) 1140 tassert.CheckFatal(t, err) 1141 totalCnt += len(objList.Entries) 1142 if objList.ContinuationToken == "" { 1143 break 1144 } 1145 tassert.Errorf(t, len(objList.Entries) == int(msg.PageSize), "wrong page size %d (expected %d)", 1146 len(objList.Entries), msg.PageSize, 1147 ) 1148 } 1149 tassert.Fatalf( 1150 t, totalCnt == m.num, 1151 "unexpected total number of objects (got: %d, expected: %d)", totalCnt, m.num, 1152 ) 1153 }) 1154 } 1155 1156 func TestListObjects(t *testing.T) { 1157 type objEntry struct { 1158 name string 1159 size int64 1160 } 1161 1162 var ( 1163 iterations = 10 1164 workerCount = 10 1165 dirLen = 10 1166 1167 bck = cmn.Bck{ 1168 Name: t.Name() + "Bucket", 1169 Provider: apc.AIS, 1170 } 1171 wg = &sync.WaitGroup{} 1172 1173 proxyURL = tools.RandomProxyURL(t) 1174 baseParams = tools.BaseAPIParams(proxyURL) 1175 ) 1176 1177 if testing.Short() { 1178 iterations = 3 1179 } 1180 1181 tests := []struct { 1182 pageSize int64 1183 }{ 1184 {pageSize: 0}, 1185 {pageSize: 2000}, 1186 {pageSize: rand.Int63n(15000)}, 1187 } 1188 1189 for _, test := range tests { 1190 var name string 1191 if test.pageSize == 0 { 1192 name = "pagesize:default" 1193 } else { 1194 name += "pagesize:" + strconv.FormatUint(uint64(test.pageSize), 10) 1195 } 1196 t.Run(name, func(t *testing.T) { 1197 var ( 1198 objs sync.Map 1199 prefixes sync.Map 1200 ) 1201 1202 tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/) 1203 1204 p := bck.DefaultProps(initialClusterConfig) 1205 1206 totalObjects := 0 1207 for iter := 1; iter <= iterations; iter++ { 1208 tlog.Logf("listing iteration: %d/%d (total_objs: %d)\n", iter, iterations, totalObjects) 1209 objectCount := rand.Intn(800) + 1010 1210 totalObjects += objectCount 1211 for wid := range workerCount { 1212 wg.Add(1) 1213 go func(wid int) { 1214 defer wg.Done() 1215 objectSize := int64(rand.Intn(256) + 20) 1216 objDir := tools.RandomObjDir(dirLen, 5) 1217 objectsToPut := objectCount / workerCount 1218 if wid == workerCount-1 { // last worker puts leftovers 1219 objectsToPut += objectCount % workerCount 1220 } 1221 objNames := tools.PutRR(t, baseParams, objectSize, p.Cksum.Type, bck, objDir, objectsToPut) 1222 for _, objName := range objNames { 1223 objs.Store(objName, objEntry{ 1224 name: objName, 1225 size: objectSize, 1226 }) 1227 } 1228 1229 if objDir != "" { 1230 prefixes.Store(objDir, objectsToPut) 1231 } 1232 }(wid) 1233 } 1234 wg.Wait() 1235 1236 // Confirm PUTs by listing objects. 1237 msg := &apc.LsoMsg{PageSize: test.pageSize} 1238 msg.AddProps(apc.GetPropsChecksum, apc.GetPropsAtime, apc.GetPropsVersion, apc.GetPropsCopies, apc.GetPropsSize) 1239 tassert.CheckError(t, api.ListObjectsInvalidateCache(baseParams, bck)) 1240 lst, err := api.ListObjects(baseParams, bck, msg, api.ListArgs{}) 1241 tassert.CheckFatal(t, err) 1242 1243 if lst.ContinuationToken != "" { 1244 t.Errorf("continuation token was unexpectedly set to: %s", lst.ContinuationToken) 1245 } 1246 1247 empty := &cmn.LsoEnt{} 1248 for _, en := range lst.Entries { 1249 e, exists := objs.Load(en.Name) 1250 if !exists { 1251 t.Errorf("failed to locate %s", bck.Cname(en.Name)) 1252 continue 1253 } 1254 1255 obj := e.(objEntry) 1256 if obj.size != en.Size { 1257 t.Errorf( 1258 "sizes do not match for object %s, expected: %d, got: %d", 1259 obj.name, obj.size, en.Size, 1260 ) 1261 } 1262 1263 if en.Version == empty.Version { 1264 t.Errorf("%s version is empty (not set)", bck.Cname(en.Name)) 1265 } else if en.Checksum == empty.Checksum || 1266 en.Atime == empty.Atime || 1267 en.Flags == empty.Flags || 1268 en.Copies == empty.Copies { 1269 t.Errorf("some fields of %s are empty (not set): %#v", bck.Cname(en.Name), en) 1270 } 1271 } 1272 1273 // Check if names in the entries are unique. 1274 objs.Range(func(key, _ any) bool { 1275 objName := key.(string) 1276 i := sort.Search(len(lst.Entries), func(i int) bool { 1277 return lst.Entries[i].Name >= objName 1278 }) 1279 if i == len(lst.Entries) || lst.Entries[i].Name != objName { 1280 t.Errorf("object %s was not found in the result of bucket listing", objName) 1281 } 1282 return true 1283 }) 1284 1285 if len(lst.Entries) != totalObjects { 1286 t.Fatalf("actual objects %d, expected: %d", len(lst.Entries), totalObjects) 1287 } 1288 1289 // Check listing bucket with predefined prefix. 1290 prefixes.Range(func(key, value any) bool { 1291 prefix := key.(string) 1292 expectedObjCount := value.(int) 1293 1294 msg := &apc.LsoMsg{ 1295 Prefix: prefix, 1296 } 1297 lst, err = api.ListObjects(baseParams, bck, msg, api.ListArgs{}) 1298 tassert.CheckFatal(t, err) 1299 1300 if expectedObjCount != len(lst.Entries) { 1301 t.Errorf( 1302 "(prefix: %s), actual objects %d, expected: %d", 1303 prefix, len(lst.Entries), expectedObjCount, 1304 ) 1305 } 1306 1307 for _, en := range lst.Entries { 1308 if !strings.HasPrefix(en.Name, prefix) { 1309 t.Errorf("object %q does not have expected prefix: %q", en.Name, prefix) 1310 } 1311 } 1312 return true 1313 }) 1314 } 1315 1316 tassert.CheckError(t, api.ListObjectsInvalidateCache(baseParams, bck)) 1317 }) 1318 } 1319 } 1320 1321 func TestListObjectsPrefix(t *testing.T) { 1322 var ( 1323 proxyURL = tools.RandomProxyURL(t) 1324 baseParams = tools.BaseAPIParams(proxyURL) 1325 ) 1326 1327 providers := []string{apc.AIS} 1328 if cliBck.IsRemote() { 1329 providers = append(providers, cliBck.Provider) 1330 } 1331 1332 for _, provider := range providers { 1333 t.Run(provider, func(t *testing.T) { 1334 const objCnt = 30 1335 var ( 1336 customPage = true 1337 bck cmn.Bck 1338 ) 1339 bckTest := cmn.Bck{Provider: provider, Ns: cmn.NsGlobal} 1340 if bckTest.IsRemote() { 1341 bck = cliBck 1342 1343 tools.CheckSkip(t, &tools.SkipTestArgs{RemoteBck: true, Bck: bck}) 1344 1345 bckProp, err := api.HeadBucket(baseParams, bck, false /* don't add */) 1346 tassert.CheckFatal(t, err) 1347 customPage = bckProp.Provider != apc.Azure 1348 1349 tlog.Logf("Cleaning up the remote bucket %s\n", bck) 1350 lst, err := api.ListObjects(baseParams, bck, nil, api.ListArgs{}) 1351 tassert.CheckFatal(t, err) 1352 for _, en := range lst.Entries { 1353 err := tools.Del(proxyURL, bck, en.Name, nil, nil, false /*silent*/) 1354 tassert.CheckFatal(t, err) 1355 } 1356 } else { 1357 bck = cmn.Bck{Name: testBucketName, Provider: provider} 1358 tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/) 1359 } 1360 1361 objNames := make([]string, 0, objCnt) 1362 1363 t.Cleanup(func() { 1364 for _, objName := range objNames { 1365 err := tools.Del(proxyURL, bck, objName, nil, nil, true /*silent*/) 1366 tassert.CheckError(t, err) 1367 } 1368 }) 1369 1370 for i := range objCnt { 1371 objName := fmt.Sprintf("prefix/obj%d", i+1) 1372 objNames = append(objNames, objName) 1373 1374 r, _ := readers.NewRand(fileSize, cos.ChecksumNone) 1375 _, err := api.PutObject(&api.PutArgs{ 1376 BaseParams: baseParams, 1377 Bck: bck, 1378 ObjName: objName, 1379 Reader: r, 1380 Size: fileSize, 1381 }) 1382 tassert.CheckFatal(t, err) 1383 } 1384 1385 tests := []struct { 1386 name string 1387 prefix string 1388 pageSize int64 1389 limit int64 1390 expected int 1391 }{ 1392 { 1393 "full_list_default_pageSize_no_limit", 1394 "prefix", 0, 0, 1395 objCnt, 1396 }, 1397 { 1398 "full_list_small_pageSize_no_limit", 1399 "prefix", objCnt / 7, 0, 1400 objCnt, 1401 }, 1402 { 1403 "full_list_limited", 1404 "prefix", 0, 8, 1405 8, 1406 }, 1407 { 1408 "full_list_prefixed", 1409 "prefix/obj1", 0, 0, 1410 11, // obj1 and obj10..obj19 1411 }, 1412 { 1413 "full_list_overlimited_prefixed", 1414 "prefix/obj1", 0, 20, 1415 11, // obj1 and obj10..obj19 1416 }, 1417 { 1418 "full_list_limited_prefixed", 1419 "prefix/obj1", 0, 2, 1420 2, // obj1 and obj10 1421 }, 1422 { 1423 "empty_list_prefixed", 1424 "prefix/nothing", 0, 0, 1425 0, 1426 }, 1427 } 1428 1429 for _, test := range tests { 1430 if test.pageSize != 0 && !customPage { 1431 tlog.Logf("Bucket %s does not support custom paging, skipping...\n", bck) 1432 continue 1433 } 1434 t.Run(test.name, func(t *testing.T) { 1435 tlog.Logf("Prefix: %q, Expected objects: %d\n", test.prefix, test.expected) 1436 msg := &apc.LsoMsg{PageSize: test.pageSize, Prefix: test.prefix} 1437 tlog.Logf( 1438 "list_objects %s [prefix: %q, page_size: %d]\n", 1439 bck, msg.Prefix, msg.PageSize, 1440 ) 1441 1442 lst, err := api.ListObjects(baseParams, bck, msg, api.ListArgs{Limit: test.limit}) 1443 tassert.CheckFatal(t, err) 1444 1445 tlog.Logf("list_objects output: %d objects\n", len(lst.Entries)) 1446 1447 if len(lst.Entries) != test.expected { 1448 t.Errorf("returned %d objects instead of %d", len(lst.Entries), test.expected) 1449 } 1450 }) 1451 } 1452 }) 1453 } 1454 } 1455 1456 func TestListObjectsCache(t *testing.T) { 1457 var ( 1458 baseParams = tools.BaseAPIParams() 1459 m = ioContext{ 1460 t: t, 1461 num: rand.Intn(3000) + 1481, 1462 fileSize: cos.KiB, 1463 } 1464 totalIters = 10 1465 ) 1466 1467 if testing.Short() { 1468 m.num = 250 + rand.Intn(500) 1469 totalIters = 5 1470 } 1471 1472 m.init(true /*cleanup*/) 1473 1474 tools.CreateBucket(t, m.proxyURL, m.bck, nil, true /*cleanup*/) 1475 m.puts() 1476 1477 for _, useCache := range []bool{true, false} { 1478 t.Run(fmt.Sprintf("cache=%t", useCache), func(t *testing.T) { 1479 // Do it N times - first: fill the cache; next calls: use it. 1480 for iter := range totalIters { 1481 var ( 1482 started = time.Now() 1483 msg = &apc.LsoMsg{PageSize: rand.Int63n(20) + 4} 1484 ) 1485 if useCache { 1486 msg.SetFlag(apc.UseListObjsCache) 1487 } 1488 objList, err := api.ListObjects(baseParams, m.bck, msg, api.ListArgs{}) 1489 tassert.CheckFatal(t, err) 1490 1491 tlog.Logf( 1492 "[iter: %d] cache: %5t, page_size: %d, time: %s\n", 1493 iter, useCache, msg.PageSize, time.Since(started), 1494 ) 1495 1496 tassert.Errorf( 1497 t, len(objList.Entries) == m.num, 1498 "unexpected number of entries (got: %d, expected: %d)", len(objList.Entries), m.num, 1499 ) 1500 } 1501 1502 if useCache { 1503 err := api.ListObjectsInvalidateCache(baseParams, m.bck) 1504 tassert.CheckError(t, err) 1505 } 1506 }) 1507 } 1508 } 1509 1510 func TestListObjectsWithRebalance(t *testing.T) { 1511 tools.CheckSkip(t, &tools.SkipTestArgs{Long: true}) 1512 1513 var ( 1514 baseParams = tools.BaseAPIParams() 1515 wg = &sync.WaitGroup{} 1516 m = ioContext{ 1517 t: t, 1518 num: 10000, 1519 fileSize: 128, 1520 } 1521 rebID string 1522 ) 1523 1524 m.initAndSaveState(true /*cleanup*/) 1525 m.expectTargets(2) 1526 1527 tools.CreateBucket(t, m.proxyURL, m.bck, nil, true /*cleanup*/) 1528 1529 target := m.startMaintenanceNoRebalance() 1530 1531 m.puts() 1532 1533 wg.Add(1) 1534 go func() { 1535 defer wg.Done() 1536 rebID = m.stopMaintenance(target) 1537 }() 1538 1539 wg.Add(1) 1540 go func() { 1541 defer wg.Done() 1542 for i := range 15 { 1543 tlog.Logf("listing all objects, iter: %d\n", i) 1544 lst, err := api.ListObjects(baseParams, m.bck, nil, api.ListArgs{}) 1545 tassert.CheckFatal(t, err) 1546 if lst.Flags == 0 { 1547 tassert.Errorf(t, len(lst.Entries) == m.num, "entries mismatch (%d vs %d)", len(lst.Entries), m.num) 1548 } else if len(lst.Entries) != m.num { 1549 tlog.Logf("List objects while rebalancing: %d vs %d\n", len(lst.Entries), m.num) 1550 } 1551 1552 time.Sleep(time.Second) 1553 } 1554 }() 1555 1556 wg.Wait() 1557 m.waitAndCheckCluState() 1558 tools.WaitForRebalanceByID(t, baseParams, rebID) 1559 } 1560 1561 func TestBucketSingleProp(t *testing.T) { 1562 const ( 1563 dataSlices = 1 1564 paritySlices = 1 1565 objLimit = 300 * cos.KiB 1566 burst = 15 1567 ) 1568 var ( 1569 m = ioContext{ 1570 t: t, 1571 } 1572 baseParams = tools.BaseAPIParams() 1573 ) 1574 1575 m.initAndSaveState(true /*cleanup*/) 1576 m.expectTargets(3) 1577 1578 tools.CreateBucket(t, m.proxyURL, m.bck, nil, true /*cleanup*/) 1579 1580 tlog.Logf("Changing bucket %q properties...\n", m.bck) 1581 1582 // Enabling EC should set default value for number of slices if it is 0 1583 _, err := api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{ 1584 EC: &cmn.ECConfToSet{Enabled: apc.Ptr(true)}, 1585 }) 1586 tassert.CheckError(t, err) 1587 p, err := api.HeadBucket(baseParams, m.bck, true /* don't add */) 1588 tassert.CheckFatal(t, err) 1589 if !p.EC.Enabled { 1590 t.Error("EC was not enabled") 1591 } 1592 if p.EC.DataSlices != 1 { 1593 t.Errorf("Number of data slices is incorrect: %d (expected 1)", p.EC.DataSlices) 1594 } 1595 if p.EC.ParitySlices != 1 { 1596 t.Errorf("Number of parity slices is incorrect: %d (expected 1)", p.EC.ParitySlices) 1597 } 1598 1599 // Need to disable EC first 1600 _, err = api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{ 1601 EC: &cmn.ECConfToSet{Enabled: apc.Ptr(false)}, 1602 }) 1603 tassert.CheckError(t, err) 1604 1605 // Enabling mirroring should set default value for number of copies if it is 0 1606 _, err = api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{ 1607 Mirror: &cmn.MirrorConfToSet{Enabled: apc.Ptr(true)}, 1608 }) 1609 tassert.CheckError(t, err) 1610 p, err = api.HeadBucket(baseParams, m.bck, true /* don't add */) 1611 tassert.CheckFatal(t, err) 1612 if !p.Mirror.Enabled { 1613 t.Error("Mirroring was not enabled") 1614 } 1615 if p.Mirror.Copies != 2 { 1616 t.Errorf("Number of copies is incorrect: %d (expected 2)", p.Mirror.Copies) 1617 } 1618 1619 // Need to disable mirroring first 1620 _, err = api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{ 1621 Mirror: &cmn.MirrorConfToSet{Enabled: apc.Ptr(false)}, 1622 }) 1623 tassert.CheckError(t, err) 1624 1625 // Change a few more bucket properties 1626 _, err = api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{ 1627 EC: &cmn.ECConfToSet{ 1628 DataSlices: apc.Ptr(dataSlices), 1629 ParitySlices: apc.Ptr(paritySlices), 1630 ObjSizeLimit: apc.Ptr[int64](objLimit), 1631 }, 1632 }) 1633 tassert.CheckError(t, err) 1634 1635 // Enable EC again 1636 _, err = api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{ 1637 EC: &cmn.ECConfToSet{Enabled: apc.Ptr(true)}, 1638 }) 1639 tassert.CheckError(t, err) 1640 p, err = api.HeadBucket(baseParams, m.bck, true /* don't add */) 1641 tassert.CheckFatal(t, err) 1642 if p.EC.DataSlices != dataSlices { 1643 t.Errorf("Number of data slices was not changed to %d. Current value %d", dataSlices, p.EC.DataSlices) 1644 } 1645 if p.EC.ParitySlices != paritySlices { 1646 t.Errorf("Number of parity slices was not changed to %d. Current value %d", paritySlices, p.EC.ParitySlices) 1647 } 1648 if p.EC.ObjSizeLimit != objLimit { 1649 t.Errorf("Minimal EC object size was not changed to %d. Current value %d", objLimit, p.EC.ObjSizeLimit) 1650 } 1651 1652 // Need to disable EC first 1653 _, err = api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{ 1654 EC: &cmn.ECConfToSet{Enabled: apc.Ptr(false)}, 1655 }) 1656 tassert.CheckError(t, err) 1657 1658 // Change mirroring threshold 1659 _, err = api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{ 1660 Mirror: &cmn.MirrorConfToSet{Burst: apc.Ptr(burst)}, 1661 }, 1662 ) 1663 tassert.CheckError(t, err) 1664 p, err = api.HeadBucket(baseParams, m.bck, true /* don't add */) 1665 tassert.CheckFatal(t, err) 1666 if p.Mirror.Burst != burst { 1667 t.Errorf("Mirror burst was not changed to %d. Current value %d", burst, p.Mirror.Burst) 1668 } 1669 1670 // Disable mirroring 1671 _, err = api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{ 1672 Mirror: &cmn.MirrorConfToSet{Enabled: apc.Ptr(false)}, 1673 }) 1674 tassert.CheckError(t, err) 1675 } 1676 1677 func TestSetBucketPropsOfNonexistentBucket(t *testing.T) { 1678 baseParams := tools.BaseAPIParams() 1679 bucket, err := tools.GenerateNonexistentBucketName(t.Name()+"Bucket", baseParams) 1680 tassert.CheckFatal(t, err) 1681 1682 bck := cmn.Bck{ 1683 Name: bucket, 1684 Provider: cliBck.Provider, 1685 } 1686 1687 _, err = api.SetBucketProps(baseParams, bck, &cmn.BpropsToSet{ 1688 EC: &cmn.ECConfToSet{Enabled: apc.Ptr(true)}, 1689 }) 1690 if err == nil { 1691 t.Fatalf("Expected SetBucketProps error, but got none.") 1692 } 1693 1694 status := api.HTTPStatus(err) 1695 if status < http.StatusBadRequest { 1696 t.Errorf("Expected status: %d, got %d", http.StatusNotFound, status) 1697 } 1698 } 1699 1700 func TestSetAllBucketPropsOfNonexistentBucket(t *testing.T) { 1701 var ( 1702 baseParams = tools.BaseAPIParams() 1703 bucketProps = &cmn.BpropsToSet{} 1704 ) 1705 1706 bucket, err := tools.GenerateNonexistentBucketName(t.Name()+"Bucket", baseParams) 1707 tassert.CheckFatal(t, err) 1708 1709 bck := cmn.Bck{ 1710 Name: bucket, 1711 Provider: cliBck.Provider, 1712 } 1713 1714 _, err = api.SetBucketProps(baseParams, bck, bucketProps) 1715 if err == nil { 1716 t.Fatalf("Expected SetBucketProps error, but got none.") 1717 } 1718 1719 status := api.HTTPStatus(err) 1720 if status < http.StatusBadRequest { 1721 t.Errorf("Expected status %d, got %d", http.StatusNotFound, status) 1722 } 1723 } 1724 1725 func TestBucketInvalidName(t *testing.T) { 1726 var ( 1727 proxyURL = tools.RandomProxyURL(t) 1728 baseParams = tools.BaseAPIParams(proxyURL) 1729 ) 1730 1731 invalidNames := []string{"*", ".", "", " ", "bucket and name", "bucket/name", "#name", "$name", "~name"} 1732 for _, name := range invalidNames { 1733 bck := cmn.Bck{ 1734 Name: name, 1735 Provider: apc.AIS, 1736 } 1737 if err := api.CreateBucket(baseParams, bck, nil); err == nil { 1738 tools.DestroyBucket(t, proxyURL, bck) 1739 t.Errorf("created bucket with invalid name %q", name) 1740 } 1741 } 1742 } 1743 1744 func TestLocalMirror(t *testing.T) { 1745 tests := []struct { 1746 numCopies []int // each of the number in the list represents the number of copies enforced on the bucket 1747 tag string 1748 skipArgs tools.SkipTestArgs 1749 }{ 1750 // set number `copies = 1` - no copies should be created 1751 {numCopies: []int{1}, tag: "copies=1"}, 1752 // set number `copies = 2` - one additional copy for each object should be created 1753 {numCopies: []int{2}, tag: "copies=2"}, 1754 // first set number of copies to 2, then to 3 1755 {numCopies: []int{2, 3}, skipArgs: tools.SkipTestArgs{Long: true}, tag: "copies=2-then-3"}, 1756 } 1757 1758 for i := range tests { 1759 test := tests[i] 1760 t.Run(test.tag, func(t *testing.T) { 1761 tools.CheckSkip(t, &test.skipArgs) 1762 testLocalMirror(t, test.numCopies) 1763 }) 1764 } 1765 } 1766 1767 func testLocalMirror(t *testing.T, numCopies []int) { 1768 const xactTimeout = 10 * time.Second 1769 m := ioContext{ 1770 t: t, 1771 num: 10000, 1772 numGetsEachFile: 5, 1773 bck: cmn.Bck{ 1774 Provider: apc.AIS, 1775 Name: trand.String(10), 1776 }, 1777 } 1778 1779 if testing.Short() { 1780 m.num = 250 1781 m.numGetsEachFile = 3 1782 } 1783 1784 m.initAndSaveState(true /*cleanup*/) 1785 1786 skip := tools.SkipTestArgs{MinMountpaths: cos.Max(numCopies...) + 1} 1787 tools.CheckSkip(t, &skip) 1788 1789 tools.CreateBucket(t, m.proxyURL, m.bck, nil, true /*cleanup*/) 1790 { 1791 baseParams := tools.BaseAPIParams() 1792 xid, err := api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{ 1793 Mirror: &cmn.MirrorConfToSet{ 1794 Enabled: apc.Ptr(true), 1795 }, 1796 }) 1797 tassert.CheckFatal(t, err) 1798 1799 p, err := api.HeadBucket(baseParams, m.bck, true /* don't add */) 1800 tassert.CheckFatal(t, err) 1801 tassert.Fatalf(t, p.Mirror.Copies == 2, "%d copies != 2", p.Mirror.Copies) 1802 1803 // Even though the bucket is empty, it can take a short while until the 1804 // xaction is propagated and finished. 1805 reqArgs := xact.ArgsMsg{ID: xid, Kind: apc.ActMakeNCopies, Bck: m.bck, Timeout: xactTimeout} 1806 _, err = api.WaitForXactionIC(baseParams, &reqArgs) 1807 tassert.CheckFatal(t, err) 1808 } 1809 1810 m.puts() 1811 1812 wg := &sync.WaitGroup{} 1813 wg.Add(1) 1814 go func() { 1815 defer wg.Done() 1816 m.gets(nil, false) 1817 }() 1818 1819 baseParams := tools.BaseAPIParams(m.proxyURL) 1820 1821 xargs := xact.ArgsMsg{Kind: apc.ActPutCopies, Bck: m.bck, Timeout: xactTimeout} 1822 _, _ = api.WaitForXactionIC(baseParams, &xargs) 1823 1824 for _, copies := range numCopies { 1825 makeNCopies(t, baseParams, m.bck, copies) 1826 } 1827 1828 // wait for all GETs to complete 1829 wg.Wait() 1830 1831 m.ensureNumCopies(baseParams, numCopies[len(numCopies)-1], false /*greaterOk*/) 1832 } 1833 1834 func makeNCopies(t *testing.T, baseParams api.BaseParams, bck cmn.Bck, ncopies int) { 1835 tlog.Logf("Set copies = %d\n", ncopies) 1836 1837 xid, err := api.MakeNCopies(baseParams, bck, ncopies) 1838 tassert.CheckFatal(t, err) 1839 1840 args := xact.ArgsMsg{ID: xid, Kind: apc.ActMakeNCopies} 1841 _, err = api.WaitForXactionIC(baseParams, &args) 1842 tassert.CheckFatal(t, err) 1843 1844 args = xact.ArgsMsg{Kind: apc.ActPutCopies, Bck: bck} 1845 api.WaitForXactionIdle(baseParams, &args) 1846 } 1847 1848 func TestRemoteBucketMirror(t *testing.T) { 1849 var ( 1850 m = &ioContext{ 1851 t: t, 1852 num: 128, 1853 bck: cliBck, 1854 prefix: t.Name(), 1855 } 1856 baseParams = tools.BaseAPIParams() 1857 ) 1858 1859 tools.CheckSkip(t, &tools.SkipTestArgs{RemoteBck: true, Bck: m.bck}) 1860 1861 m.init(true /*cleanup*/) 1862 m.remotePuts(true /*evict*/) 1863 1864 // enable mirror 1865 _, err := api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{ 1866 Mirror: &cmn.MirrorConfToSet{Enabled: apc.Ptr(true)}, 1867 }) 1868 tassert.CheckFatal(t, err) 1869 defer api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{ 1870 Mirror: &cmn.MirrorConfToSet{Enabled: apc.Ptr(false)}, 1871 }) 1872 1873 // list 1874 msg := &apc.LsoMsg{Prefix: m.prefix, Props: apc.GetPropsName} 1875 objectList, err := api.ListObjects(baseParams, m.bck, msg, api.ListArgs{}) 1876 tassert.CheckFatal(t, err) 1877 tassert.Fatalf( 1878 t, len(objectList.Entries) == m.num, 1879 "wrong number of objects in the remote bucket %s: need %d, got %d", 1880 m.bck, m.num, len(objectList.Entries), 1881 ) 1882 1883 tools.CheckSkip(t, &tools.SkipTestArgs{MinMountpaths: 4}) 1884 1885 // cold GET - causes local mirroring 1886 m.remotePrefetch(m.num) 1887 m.ensureNumCopies(baseParams, 2, false /*greaterOk*/) 1888 time.Sleep(3 * time.Second) 1889 1890 // Increase number of copies 1891 makeNCopies(t, baseParams, m.bck, 3) 1892 m.ensureNumCopies(baseParams, 3, false /*greaterOk*/) 1893 } 1894 1895 func TestBucketReadOnly(t *testing.T) { 1896 m := ioContext{ 1897 t: t, 1898 num: 10, 1899 numGetsEachFile: 2, 1900 } 1901 m.init(true /*cleanup*/) 1902 tools.CreateBucket(t, m.proxyURL, m.bck, nil, true /*cleanup*/) 1903 baseParams := tools.BaseAPIParams() 1904 1905 m.puts() 1906 m.gets(nil, false) 1907 1908 p, err := api.HeadBucket(baseParams, m.bck, true /* don't add */) 1909 tassert.CheckFatal(t, err) 1910 1911 // make bucket read-only 1912 // NOTE: must allow PATCH - otherwise api.SetBucketProps a few lines down below won't work 1913 aattrs := apc.AccessRO | apc.AcePATCH 1914 _, err = api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{Access: apc.Ptr(aattrs)}) 1915 tassert.CheckFatal(t, err) 1916 1917 m.init(true /*cleanup*/) 1918 m.puts(true /*ignoreErr*/) 1919 tassert.Fatalf(t, m.numPutErrs == m.num, "num failed PUTs %d, expecting %d", m.numPutErrs, m.num) 1920 1921 // restore write access 1922 _, err = api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{Access: apc.Ptr(p.Access)}) 1923 tassert.CheckFatal(t, err) 1924 1925 // write some more and destroy 1926 m.init(true /*cleanup*/) 1927 m.puts(true /*ignoreErr*/) 1928 tassert.Fatalf(t, m.numPutErrs == 0, "num failed PUTs %d, expecting 0 (zero)", m.numPutErrs) 1929 } 1930 1931 func TestRenameBucketEmpty(t *testing.T) { 1932 tools.CheckSkip(t, &tools.SkipTestArgs{Long: true}) 1933 var ( 1934 m = ioContext{ 1935 t: t, 1936 } 1937 baseParams = tools.BaseAPIParams() 1938 dstBck = cmn.Bck{ 1939 Name: testBucketName + "_new", 1940 Provider: apc.AIS, 1941 } 1942 ) 1943 1944 m.initAndSaveState(true /*cleanup*/) 1945 m.expectTargets(1) 1946 1947 srcBck := m.bck 1948 tools.CreateBucket(t, m.proxyURL, srcBck, nil, true /*cleanup*/) 1949 defer func() { 1950 tools.DestroyBucket(t, m.proxyURL, dstBck) 1951 }() 1952 tools.DestroyBucket(t, m.proxyURL, dstBck) 1953 1954 m.setNonDefaultBucketProps() 1955 srcProps, err := api.HeadBucket(baseParams, srcBck, true /* don't add */) 1956 tassert.CheckFatal(t, err) 1957 1958 // Rename it 1959 tlog.Logf("rename %s => %s\n", srcBck, dstBck) 1960 uuid, err := api.RenameBucket(baseParams, srcBck, dstBck) 1961 tassert.CheckFatal(t, err) 1962 1963 args := xact.ArgsMsg{ID: uuid, Kind: apc.ActMoveBck, Timeout: tools.RebalanceTimeout} 1964 _, err = api.WaitForXactionIC(baseParams, &args) 1965 tassert.CheckFatal(t, err) 1966 1967 // Check if the new bucket appears in the list 1968 bcks, err := api.ListBuckets(baseParams, cmn.QueryBcks{Provider: apc.AIS}, apc.FltPresent) 1969 tassert.CheckFatal(t, err) 1970 1971 if !tools.BucketsContain(bcks, cmn.QueryBcks(dstBck)) { 1972 t.Error("new bucket not found in buckets list") 1973 } 1974 1975 tlog.Logln("checking bucket props...") 1976 dstProps, err := api.HeadBucket(baseParams, dstBck, true /* don't add */) 1977 tassert.CheckFatal(t, err) 1978 if !srcProps.Equal(dstProps) { 1979 t.Fatalf("source and destination bucket props do not match: %v - %v", srcProps, dstProps) 1980 } 1981 } 1982 1983 func TestRenameBucketNonEmpty(t *testing.T) { 1984 tools.CheckSkip(t, &tools.SkipTestArgs{Long: true}) 1985 var ( 1986 m = ioContext{ 1987 t: t, 1988 num: 1000, 1989 numGetsEachFile: 2, 1990 } 1991 baseParams = tools.BaseAPIParams() 1992 dstBck = cmn.Bck{ 1993 Name: testBucketName + "_new", 1994 Provider: apc.AIS, 1995 } 1996 ) 1997 1998 m.initAndSaveState(true /*cleanup*/) 1999 m.proxyURL = tools.RandomProxyURL(t) 2000 m.expectTargets(1) 2001 2002 srcBck := m.bck 2003 tools.CreateBucket(t, m.proxyURL, srcBck, nil, true /*cleanup*/) 2004 defer func() { 2005 // This bucket should be present. 2006 tools.DestroyBucket(t, m.proxyURL, dstBck) 2007 }() 2008 tools.DestroyBucket(t, m.proxyURL, dstBck) 2009 2010 m.setNonDefaultBucketProps() 2011 srcProps, err := api.HeadBucket(baseParams, srcBck, true /* don't add */) 2012 tassert.CheckFatal(t, err) 2013 2014 // Put some files 2015 m.puts() 2016 2017 // Rename it 2018 tlog.Logf("rename %s => %s\n", srcBck, dstBck) 2019 m.bck = dstBck 2020 xid, err := api.RenameBucket(baseParams, srcBck, dstBck) 2021 if err != nil && ensurePrevRebalanceIsFinished(baseParams, err) { 2022 // can retry 2023 xid, err = api.RenameBucket(baseParams, srcBck, dstBck) 2024 } 2025 2026 tassert.CheckFatal(t, err) 2027 2028 args := xact.ArgsMsg{ID: xid, Kind: apc.ActMoveBck, Timeout: tools.RebalanceTimeout} 2029 _, err = api.WaitForXactionIC(baseParams, &args) 2030 tassert.CheckFatal(t, err) 2031 2032 // Gets on renamed ais bucket 2033 m.gets(nil, false) 2034 m.ensureNoGetErrors() 2035 2036 tlog.Logln("checking bucket props...") 2037 dstProps, err := api.HeadBucket(baseParams, dstBck, true /* don't add */) 2038 tassert.CheckFatal(t, err) 2039 if !srcProps.Equal(dstProps) { 2040 t.Fatalf("source and destination bucket props do not match: %v - %v", srcProps, dstProps) 2041 } 2042 } 2043 2044 func TestRenameBucketAlreadyExistingDst(t *testing.T) { 2045 var ( 2046 m = ioContext{ 2047 t: t, 2048 } 2049 baseParams = tools.BaseAPIParams() 2050 tmpBck = cmn.Bck{ 2051 Name: "tmp_bck_name", 2052 Provider: apc.AIS, 2053 } 2054 ) 2055 2056 m.initAndSaveState(true /*cleanup*/) 2057 m.expectTargets(1) 2058 2059 tools.CreateBucket(t, m.proxyURL, m.bck, nil, true /*cleanup*/) 2060 2061 m.setNonDefaultBucketProps() 2062 srcProps, err := api.HeadBucket(baseParams, m.bck, true /* don't add */) 2063 tassert.CheckFatal(t, err) 2064 2065 tools.CreateBucket(t, m.proxyURL, tmpBck, nil, true /*cleanup*/) 2066 2067 // rename 2068 tlog.Logf("try rename %s => %s (that already exists)\n", m.bck, tmpBck) 2069 if _, err := api.RenameBucket(baseParams, m.bck, tmpBck); err == nil { 2070 t.Fatal("expected an error renaming already existing bucket") 2071 } 2072 2073 bcks, err := api.ListBuckets(baseParams, cmn.QueryBcks{Provider: apc.AIS}, apc.FltPresent) 2074 tassert.CheckFatal(t, err) 2075 2076 if !tools.BucketsContain(bcks, cmn.QueryBcks(m.bck)) || !tools.BucketsContain(bcks, cmn.QueryBcks(tmpBck)) { 2077 t.Errorf("one of the buckets (%s, %s) was not found in the list %+v", m.bck, tmpBck, bcks) 2078 } 2079 2080 dstProps, err := api.HeadBucket(baseParams, tmpBck, true /* don't add */) 2081 tassert.CheckFatal(t, err) 2082 2083 if srcProps.Equal(dstProps) { 2084 t.Fatalf("source and destination props (checksums, in particular) are not expected to match: %v vs %v", 2085 srcProps.Cksum, dstProps.Cksum) 2086 } 2087 } 2088 2089 // Tries to rename same source bucket to two destination buckets - the second should fail. 2090 func TestRenameBucketTwice(t *testing.T) { 2091 tools.CheckSkip(t, &tools.SkipTestArgs{Long: true}) 2092 var ( 2093 m = ioContext{ 2094 t: t, 2095 num: 500, 2096 } 2097 baseParams = tools.BaseAPIParams() 2098 dstBck1 = cmn.Bck{ 2099 Name: testBucketName + "_new1", 2100 Provider: apc.AIS, 2101 } 2102 dstBck2 = cmn.Bck{ 2103 Name: testBucketName + "_new2", 2104 Provider: apc.AIS, 2105 } 2106 ) 2107 2108 m.initAndSaveState(true /*cleanup*/) 2109 m.proxyURL = tools.RandomProxyURL(t) 2110 m.expectTargets(1) 2111 2112 srcBck := m.bck 2113 tools.CreateBucket(t, m.proxyURL, srcBck, nil, true /*cleanup*/) 2114 defer func() { 2115 // This bucket should not be present (thus ignoring error) but 2116 // try to delete in case something failed. 2117 api.DestroyBucket(baseParams, dstBck2) 2118 // This one should be present. 2119 tools.DestroyBucket(t, m.proxyURL, dstBck1) 2120 }() 2121 2122 m.puts() 2123 2124 // Rename to first destination 2125 tlog.Logf("rename %s => %s\n", srcBck, dstBck1) 2126 xid, err := api.RenameBucket(baseParams, srcBck, dstBck1) 2127 if err != nil && ensurePrevRebalanceIsFinished(baseParams, err) { 2128 // can retry 2129 xid, err = api.RenameBucket(baseParams, srcBck, dstBck1) 2130 } 2131 tassert.CheckFatal(t, err) 2132 2133 // Try to rename to first destination again - already in progress 2134 tlog.Logf("try renaming %s => %s\n", srcBck, dstBck1) 2135 _, err = api.RenameBucket(baseParams, srcBck, dstBck1) 2136 if err == nil { 2137 t.Error("multiple rename operations on same bucket should fail") 2138 } 2139 2140 // Try to rename to second destination - this should fail 2141 tlog.Logf("try rename %s => %s\n", srcBck, dstBck2) 2142 _, err = api.RenameBucket(baseParams, srcBck, dstBck2) 2143 if err == nil { 2144 t.Error("multiple rename operations on same bucket should fail") 2145 } 2146 2147 // Wait for rename to complete 2148 args := xact.ArgsMsg{ID: xid, Kind: apc.ActMoveBck, Timeout: tools.RebalanceTimeout} 2149 _, err = api.WaitForXactionIC(baseParams, &args) 2150 tassert.CheckFatal(t, err) 2151 2152 // Check if the new bucket appears in the list 2153 bcks, err := api.ListBuckets(baseParams, cmn.QueryBcks{Provider: apc.AIS}, apc.FltPresent) 2154 tassert.CheckFatal(t, err) 2155 2156 if tools.BucketsContain(bcks, cmn.QueryBcks(srcBck)) { 2157 t.Error("source bucket found in buckets list") 2158 } 2159 if !tools.BucketsContain(bcks, cmn.QueryBcks(dstBck1)) { 2160 t.Error("destination bucket not found in buckets list") 2161 } 2162 if tools.BucketsContain(bcks, cmn.QueryBcks(dstBck2)) { 2163 t.Error("second (failed) destination bucket found in buckets list") 2164 } 2165 } 2166 2167 func TestRenameBucketNonExistentSrc(t *testing.T) { 2168 var ( 2169 m = ioContext{ 2170 t: t, 2171 } 2172 baseParams = tools.BaseAPIParams() 2173 dstBck = cmn.Bck{ 2174 Name: trand.String(10), 2175 Provider: apc.AIS, 2176 } 2177 srcBcks = []cmn.Bck{ 2178 { 2179 Name: trand.String(10), 2180 Provider: apc.AIS, 2181 }, 2182 { 2183 Name: trand.String(10), 2184 Provider: apc.AWS, 2185 }, 2186 } 2187 ) 2188 2189 m.initAndSaveState(true /*cleanup*/) 2190 m.expectTargets(1) 2191 2192 for _, srcBck := range srcBcks { 2193 _, err := api.RenameBucket(baseParams, srcBck, dstBck) 2194 tools.CheckErrIsNotFound(t, err) 2195 _, err = api.HeadBucket(baseParams, dstBck, true /* don't add */) 2196 tools.CheckErrIsNotFound(t, err) 2197 } 2198 } 2199 2200 func TestRenameBucketWithBackend(t *testing.T) { 2201 tools.CheckSkip(t, &tools.SkipTestArgs{CloudBck: true, Bck: cliBck}) 2202 2203 var ( 2204 proxyURL = tools.RandomProxyURL() 2205 baseParams = tools.BaseAPIParams(proxyURL) 2206 bck = cmn.Bck{ 2207 Name: "renamesrc", 2208 Provider: apc.AIS, 2209 } 2210 dstBck = cmn.Bck{ 2211 Name: "bucketname", 2212 Provider: apc.AIS, 2213 } 2214 ) 2215 2216 tools.CreateBucket(t, proxyURL, bck, 2217 &cmn.BpropsToSet{BackendBck: &cmn.BackendBckToSet{ 2218 Name: apc.Ptr(cliBck.Name), 2219 Provider: apc.Ptr(cliBck.Provider), 2220 }}, true /*cleanup*/) 2221 t.Cleanup(func() { 2222 tools.DestroyBucket(t, proxyURL, dstBck) 2223 }) 2224 2225 srcProps, err := api.HeadBucket(baseParams, bck, true /* don't add */) 2226 tassert.CheckFatal(t, err) 2227 2228 xid, err := api.RenameBucket(baseParams, bck, dstBck) 2229 if err != nil && ensurePrevRebalanceIsFinished(baseParams, err) { 2230 // can retry 2231 xid, err = api.RenameBucket(baseParams, bck, dstBck) 2232 } 2233 2234 tassert.CheckFatal(t, err) 2235 xargs := xact.ArgsMsg{ID: xid} 2236 _, err = api.WaitForXactionIC(baseParams, &xargs) 2237 tassert.CheckFatal(t, err) 2238 2239 exists, err := api.QueryBuckets(baseParams, cmn.QueryBcks(bck), apc.FltPresent) 2240 tassert.CheckFatal(t, err) 2241 tassert.Errorf(t, !exists, "source bucket shouldn't exist") 2242 2243 tlog.Logln("checking bucket props...") 2244 dstProps, err := api.HeadBucket(baseParams, dstBck, true /* don't add */) 2245 tassert.CheckFatal(t, err) 2246 2247 tassert.Fatalf( 2248 t, srcProps.Versioning.Enabled == dstProps.Versioning.Enabled, 2249 "source and destination bucket versioning does not match: %t vs. %t, respectively", 2250 srcProps.Versioning.Enabled, dstProps.Versioning.Enabled, 2251 ) 2252 2253 // AWS region might be set upon rename. 2254 srcProps.Extra.AWS.CloudRegion = "" 2255 dstProps.Extra.AWS.CloudRegion = "" 2256 2257 tassert.Fatalf(t, srcProps.Equal(dstProps), "source and destination bucket props do not match:\n%v\n%v", srcProps, dstProps) 2258 } 2259 2260 func TestCopyBucket(t *testing.T) { 2261 tests := []struct { 2262 srcRemote bool 2263 dstRemote bool 2264 dstBckExist bool // determines if destination bucket exists before copy or not 2265 dstBckHasObjects bool // determines if destination bucket contains any objects before copy or not 2266 multipleDests bool // determines if there are multiple destinations to which objects are copied 2267 onlyLong bool 2268 evictRemoteSrc bool 2269 }{ 2270 // ais -> ais 2271 {srcRemote: false, dstRemote: false, dstBckExist: false, dstBckHasObjects: false, multipleDests: false}, 2272 {srcRemote: false, dstRemote: false, dstBckExist: true, dstBckHasObjects: false, multipleDests: false, onlyLong: true}, 2273 {srcRemote: false, dstRemote: false, dstBckExist: true, dstBckHasObjects: true, multipleDests: false, onlyLong: true}, 2274 {srcRemote: false, dstRemote: false, dstBckExist: false, dstBckHasObjects: false, multipleDests: true, onlyLong: true}, 2275 {srcRemote: false, dstRemote: false, dstBckExist: true, dstBckHasObjects: true, multipleDests: true, onlyLong: true}, 2276 2277 // remote -> ais 2278 {srcRemote: true, dstRemote: false, dstBckExist: false, dstBckHasObjects: false}, 2279 {srcRemote: true, dstRemote: false, dstBckExist: true, dstBckHasObjects: false}, 2280 {srcRemote: true, dstRemote: false, dstBckExist: true, dstBckHasObjects: true}, 2281 {srcRemote: true, dstRemote: false, dstBckExist: false, dstBckHasObjects: false, multipleDests: true}, 2282 {srcRemote: true, dstRemote: false, dstBckExist: true, dstBckHasObjects: true, multipleDests: true}, 2283 2284 // evicted remote -> ais 2285 {srcRemote: true, dstRemote: false, dstBckExist: false, dstBckHasObjects: false, evictRemoteSrc: true}, 2286 {srcRemote: true, dstRemote: false, dstBckExist: true, dstBckHasObjects: false, evictRemoteSrc: true}, 2287 2288 // ais -> remote 2289 {srcRemote: false, dstRemote: true, dstBckExist: true, dstBckHasObjects: false}, 2290 } 2291 2292 for _, test := range tests { 2293 // Bucket must exist when we require it to have objects. 2294 cos.Assert(test.dstBckExist || !test.dstBckHasObjects) 2295 2296 // in integration tests, we only have 1 remote bucket (cliBck) 2297 // (TODO: add remote -> remote) 2298 cos.Assert(!test.srcRemote || !test.dstRemote) 2299 2300 testName := fmt.Sprintf("src-remote=%t/dst-remote=%t/", test.srcRemote, test.dstRemote) 2301 if test.evictRemoteSrc { 2302 cos.Assert(test.srcRemote) 2303 testName = fmt.Sprintf("src-remote-evicted/dst-remote=%t/", test.dstRemote) 2304 } 2305 if test.dstBckExist { 2306 testName += "dst-present/" 2307 if test.dstBckHasObjects { 2308 testName += "with_objs" 2309 } else { 2310 testName += "without_objs" 2311 } 2312 } else { 2313 testName += "dst-absent" 2314 } 2315 if test.multipleDests { 2316 testName += "/multiple_dests" 2317 } 2318 2319 t.Run(testName, func(t *testing.T) { 2320 tools.CheckSkip(t, &tools.SkipTestArgs{Long: test.onlyLong}) 2321 var ( 2322 srcBckList *cmn.LsoRes 2323 2324 objCnt = 100 2325 srcm = &ioContext{ 2326 t: t, 2327 num: objCnt, 2328 bck: cmn.Bck{ 2329 Name: "src_copy_bck", 2330 Provider: apc.AIS, 2331 }, 2332 } 2333 dstms = []*ioContext{ 2334 { 2335 t: t, 2336 num: objCnt, 2337 bck: cmn.Bck{ 2338 Name: "dst_copy_bck_1", 2339 Provider: apc.AIS, 2340 }, 2341 }, 2342 } 2343 baseParams = tools.BaseAPIParams() 2344 ) 2345 tools.DestroyBucket(t, proxyURL, srcm.bck) 2346 tools.DestroyBucket(t, proxyURL, dstms[0].bck) 2347 2348 if test.multipleDests { 2349 dstms = append(dstms, &ioContext{ 2350 t: t, 2351 num: objCnt, 2352 bck: cmn.Bck{ 2353 Name: "dst_copy_bck_2", 2354 Provider: apc.AIS, 2355 }, 2356 }) 2357 tools.DestroyBucket(t, proxyURL, dstms[1].bck) 2358 } 2359 bckTest := cmn.Bck{Provider: apc.AIS, Ns: cmn.NsGlobal} 2360 if test.srcRemote { 2361 srcm.bck = cliBck 2362 srcm.deleteRemoteBckObjs = true 2363 bckTest.Provider = cliBck.Provider 2364 tools.CheckSkip(t, &tools.SkipTestArgs{RemoteBck: true, Bck: srcm.bck}) 2365 } 2366 if test.dstRemote { 2367 dstms = []*ioContext{ 2368 { 2369 t: t, 2370 num: 0, // Make sure to not put anything new to destination remote bucket 2371 bck: cliBck, 2372 }, 2373 } 2374 tools.CheckSkip(t, &tools.SkipTestArgs{RemoteBck: true, Bck: dstms[0].bck}) 2375 } 2376 2377 srcm.initAndSaveState(true /*cleanup*/) 2378 srcm.expectTargets(1) 2379 2380 for _, dstm := range dstms { 2381 dstm.init(true /*cleanup*/) 2382 } 2383 2384 if bckTest.IsAIS() { 2385 tools.CreateBucket(t, srcm.proxyURL, srcm.bck, nil, true) 2386 srcm.setNonDefaultBucketProps() 2387 } 2388 2389 if test.dstBckExist { 2390 for _, dstm := range dstms { 2391 if !dstm.bck.IsRemote() { 2392 tools.CreateBucket(t, dstm.proxyURL, dstm.bck, nil, true /*cleanup*/) 2393 } 2394 } 2395 } else { // cleanup 2396 for _, dstm := range dstms { 2397 if !dstm.bck.IsRemote() { 2398 t.Cleanup(func() { 2399 tools.DestroyBucket(t, dstm.proxyURL, dstm.bck) 2400 }) 2401 } 2402 } 2403 } 2404 2405 srcProps, err := api.HeadBucket(baseParams, srcm.bck, true /* don't add */) 2406 tassert.CheckFatal(t, err) 2407 2408 if test.dstBckHasObjects { 2409 for _, dstm := range dstms { 2410 // Don't make PUTs to remote bucket 2411 if !dstm.bck.IsRemote() { 2412 dstm.puts() 2413 } 2414 } 2415 } 2416 2417 if bckTest.IsAIS() { 2418 srcm.puts() 2419 2420 srcBckList, err = api.ListObjects(baseParams, srcm.bck, nil, api.ListArgs{}) 2421 tassert.CheckFatal(t, err) 2422 } else if bckTest.IsRemote() { 2423 srcm.remotePuts(false /*evict*/) 2424 srcBckList, err = api.ListObjects(baseParams, srcm.bck, nil, api.ListArgs{}) 2425 tassert.CheckFatal(t, err) 2426 if test.evictRemoteSrc { 2427 tlog.Logf("evicting %s\n", srcm.bck) 2428 // 2429 // evict all _cached_ data from the "local" cluster 2430 // keep the src bucket in the "local" BMD though 2431 // 2432 err := api.EvictRemoteBucket(baseParams, srcm.bck, true /*keep empty src bucket in the BMD*/) 2433 tassert.CheckFatal(t, err) 2434 } 2435 defer srcm.del() 2436 } else { 2437 panic(bckTest) 2438 } 2439 2440 xactIDs := make([]string, 0, len(dstms)) 2441 for _, dstm := range dstms { 2442 var ( 2443 uuid string 2444 err error 2445 cmsg = &apc.CopyBckMsg{Force: true} 2446 ) 2447 if test.evictRemoteSrc { 2448 uuid, err = api.CopyBucket(baseParams, srcm.bck, dstm.bck, cmsg, apc.FltExists) 2449 } else { 2450 uuid, err = api.CopyBucket(baseParams, srcm.bck, dstm.bck, cmsg) 2451 } 2452 tassert.CheckFatal(t, err) 2453 tlog.Logf("copying %s => %s: %s\n", srcm.bck, dstm.bck, uuid) 2454 if uuids := strings.Split(uuid, xact.SepaID); len(uuids) > 1 { 2455 for _, u := range uuids { 2456 tassert.Fatalf(t, xact.IsValidUUID(u), "invalid UUID %q", u) 2457 } 2458 xactIDs = append(xactIDs, uuids...) 2459 } else { 2460 tassert.Fatalf(t, xact.IsValidUUID(uuid), "invalid UUID %q", uuid) 2461 xactIDs = append(xactIDs, uuid) 2462 } 2463 } 2464 2465 for _, uuid := range xactIDs { 2466 // TODO -- FIXME: remove/simplify-out this `if` here and elsewhere 2467 if test.evictRemoteSrc { 2468 // wait for TCO idle (different x-kind) 2469 args := xact.ArgsMsg{ID: uuid, Timeout: tools.CopyBucketTimeout} 2470 err := api.WaitForXactionIdle(baseParams, &args) 2471 tassert.CheckFatal(t, err) 2472 } else { 2473 args := xact.ArgsMsg{ID: uuid, Kind: apc.ActCopyBck, Timeout: tools.CopyBucketTimeout} 2474 _, err := api.WaitForXactionIC(baseParams, &args) 2475 tassert.CheckFatal(t, err) 2476 } 2477 } 2478 2479 for _, dstm := range dstms { 2480 if dstm.bck.IsRemote() { 2481 continue 2482 } 2483 2484 tlog.Logf("checking and comparing bucket %s props\n", dstm.bck) 2485 dstProps, err := api.HeadBucket(baseParams, dstm.bck, true /* don't add */) 2486 tassert.CheckFatal(t, err) 2487 2488 if dstProps.Provider != apc.AIS { 2489 t.Fatalf("destination bucket does not seem to be 'ais': %s", dstProps.Provider) 2490 } 2491 // Clear providers to compare the props across different ones 2492 srcProps.Provider = "" 2493 dstProps.Provider = "" 2494 2495 // If bucket existed before, ensure that the bucket props were **not** copied over. 2496 if test.dstBckExist && srcProps.Equal(dstProps) { 2497 t.Fatalf("source and destination bucket props match, even though they should not:\n%#v\n%#v", 2498 srcProps, dstProps) 2499 } 2500 2501 // When copying remote => ais we create the destination ais bucket on the fly 2502 // with the default props. In all other cases (including ais => ais) bucket props must match. 2503 if !test.dstBckExist { 2504 if test.srcRemote && !test.dstRemote { 2505 // TODO: validate default props 2506 } else if !srcProps.Equal(dstProps) { 2507 t.Fatalf("source and destination bucket props do not match:\n%#v\n%#v", 2508 srcProps, dstProps) 2509 } 2510 } 2511 } 2512 2513 for _, dstm := range dstms { 2514 tlog.Logf("checking and comparing objects of bucket %s\n", dstm.bck) 2515 expectedObjCount := srcm.num 2516 if test.dstBckHasObjects { 2517 expectedObjCount += dstm.num 2518 } 2519 2520 _, err := api.HeadBucket(baseParams, srcm.bck, true /* don't add */) 2521 tassert.CheckFatal(t, err) 2522 dstmProps, err := api.HeadBucket(baseParams, dstm.bck, true /* don't add */) 2523 tassert.CheckFatal(t, err) 2524 2525 msg := &apc.LsoMsg{} 2526 msg.AddProps(apc.GetPropsVersion) 2527 if test.dstRemote { 2528 msg.Flags = apc.LsObjCached 2529 } 2530 2531 dstBckList, err := api.ListObjects(baseParams, dstm.bck, msg, api.ListArgs{}) 2532 tassert.CheckFatal(t, err) 2533 if len(dstBckList.Entries) != expectedObjCount { 2534 t.Fatalf("list_objects: dst %s, cnt %d != %d cnt, src %s", 2535 dstm.bck.Cname(""), len(dstBckList.Entries), expectedObjCount, srcm.bck.Cname("")) 2536 } 2537 2538 tlog.Logf("verifying that %d copied objects have identical props\n", expectedObjCount) 2539 for _, a := range srcBckList.Entries { 2540 var found bool 2541 for _, b := range dstBckList.Entries { 2542 if a.Name == b.Name { 2543 found = true 2544 2545 if dstm.bck.IsRemote() && dstmProps.Versioning.Enabled { 2546 tassert.Fatalf(t, b.Version != "", 2547 "Expected non-empty object %q version", b.Name) 2548 } 2549 2550 break 2551 } 2552 } 2553 if !found { 2554 t.Fatalf("%s is missing in the destination bucket %s", srcm.bck.Cname(a.Name), dstm.bck.Cname("")) 2555 } 2556 } 2557 } 2558 }) 2559 } 2560 } 2561 2562 func TestCopyBucketSync(t *testing.T) { 2563 tools.CheckSkip(t, &tools.SkipTestArgs{ 2564 Long: true, 2565 RemoteBck: true, 2566 Bck: cliBck, 2567 RequiresRemoteCluster: true, // NOTE: utilizing remote cluster to simulate out-of-band delete 2568 }) 2569 var ( 2570 m = ioContext{ 2571 t: t, 2572 bck: cliBck, 2573 num: 500, 2574 fileSize: 128, 2575 prefix: trand.String(6) + "-", 2576 } 2577 baseParams = tools.BaseAPIParams() 2578 ) 2579 2580 m.init(true /*cleanup*/) 2581 2582 // 1. PUT(num-objs) => cliBck 2583 m.puts() 2584 tassert.Errorf(t, len(m.objNames) == m.num, "expected %d in the source bucket, got %d", m.num, len(m.objNames)) 2585 2586 tlog.Logf("list source %s objects\n", cliBck.Cname("")) 2587 msg := &apc.LsoMsg{Prefix: m.prefix, Flags: apc.LsObjCached} 2588 lst, err := api.ListObjects(baseParams, m.bck, msg, api.ListArgs{}) 2589 tassert.CheckFatal(t, err) 2590 tassert.Errorf(t, len(lst.Entries) == m.num, "expected %d present (cached) in the source bucket, got %d", m.num, len(lst.Entries)) 2591 2592 // 2. copy cliBck => dstBck 2593 dstBck := cmn.Bck{Name: "dst-" + cos.GenTie(), Provider: apc.AIS} 2594 tlog.Logf("first copy %s => %s\n", m.bck.Cname(""), dstBck.Cname("")) 2595 xid, err := api.CopyBucket(baseParams, m.bck, dstBck, &apc.CopyBckMsg{}) 2596 tassert.CheckFatal(t, err) 2597 t.Cleanup(func() { 2598 tools.DestroyBucket(t, proxyURL, dstBck) 2599 }) 2600 args := xact.ArgsMsg{ID: xid, Kind: apc.ActCopyBck, Timeout: time.Minute} 2601 _, err = api.WaitForXactionIC(baseParams, &args) 2602 tassert.CheckFatal(t, err) 2603 2604 tlog.Logf("list destination %s objects\n", dstBck.Cname("")) 2605 lst, err = api.ListObjects(baseParams, dstBck, msg, api.ListArgs{}) 2606 tassert.CheckFatal(t, err) 2607 tassert.Fatalf(t, len(lst.Entries) == m.num, "expected %d in the destination bucket, got %d", m.num, len(lst.Entries)) 2608 2609 // 3. select random 10% to delete 2610 num2del := max(m.num/10, 1) 2611 nam2del := make([]string, 0, num2del) 2612 strtpos := rand.Intn(m.num) 2613 for i := range num2del { 2614 pos := (strtpos + i*3) % m.num 2615 name := m.objNames[pos] 2616 for cos.StringInSlice(name, nam2del) { 2617 pos++ 2618 name = m.objNames[pos%m.num] 2619 } 2620 nam2del = append(nam2del, name) 2621 } 2622 2623 // 4. use remais to out-of-band delete nam2del... 2624 tlog.Logf("use remote cluster '%s' to out-of-band delete %d objects from %s (source)\n", 2625 tools.RemoteCluster.Alias, len(nam2del), m.bck.Cname("")) 2626 remoteBP := tools.BaseAPIParams(tools.RemoteCluster.URL) 2627 for _, name := range nam2del { 2628 err := api.DeleteObject(remoteBP, cliBck, name) 2629 tassert.CheckFatal(t, err) 2630 } 2631 2632 // 5. copy --sync (and note that prior to this step destination has all m.num) 2633 tlog.Logf("second copy %s => %s with '--sync' option\n", m.bck.Cname(""), dstBck.Cname("")) 2634 xid, err = api.CopyBucket(baseParams, m.bck, dstBck, &apc.CopyBckMsg{Sync: true}) 2635 tassert.CheckFatal(t, err) 2636 args.ID = xid 2637 _, err = api.WaitForXactionIC(baseParams, &args) 2638 tassert.CheckFatal(t, err) 2639 2640 tlog.Logf("list post-sync destination %s\n", dstBck.Cname("")) 2641 lst, err = api.ListObjects(baseParams, dstBck, msg, api.ListArgs{}) 2642 tassert.CheckFatal(t, err) 2643 tassert.Errorf(t, len(lst.Entries) == m.num-len(nam2del), "expected %d objects in the (sync-ed) destination, got %d", 2644 m.num-len(nam2del), len(lst.Entries)) 2645 } 2646 2647 func TestCopyBucketSimple(t *testing.T) { 2648 var ( 2649 srcBck = cmn.Bck{Name: "cpybck_src" + cos.GenTie(), Provider: apc.AIS} 2650 2651 m = &ioContext{ 2652 t: t, 2653 num: 500, // x 2 2654 fileSize: 512, 2655 fixedSize: true, 2656 bck: srcBck, 2657 } 2658 ) 2659 if testing.Short() { 2660 m.num /= 10 2661 } 2662 2663 tlog.Logf("Preparing source bucket %s\n", srcBck) 2664 tools.CreateBucket(t, proxyURL, srcBck, nil, true /*cleanup*/) 2665 m.initAndSaveState(true /*cleanup*/) 2666 2667 m.puts() 2668 m.prefix = "subdir/" 2669 m.puts() 2670 m.num *= 2 2671 2672 f := func() { 2673 list, err := api.ListObjects(baseParams, srcBck, nil, api.ListArgs{}) 2674 tassert.CheckFatal(t, err) 2675 tassert.Errorf(t, len(list.Entries) == m.num, "expected %d in the source bucket, got %d", m.num, len(list.Entries)) 2676 } 2677 2678 // pre-abort sleep 2679 sleep := time.Second 2680 if m.smap.CountActiveTs() == 1 { 2681 sleep = time.Millisecond 2682 } 2683 2684 t.Run("Stats", func(t *testing.T) { f(); testCopyBucketStats(t, srcBck, m) }) 2685 t.Run("Prepend", func(t *testing.T) { f(); testCopyBucketPrepend(t, srcBck, m) }) 2686 t.Run("Prefix", func(t *testing.T) { f(); testCopyBucketPrefix(t, srcBck, m, m.num/2) }) 2687 t.Run("Abort", func(t *testing.T) { f(); testCopyBucketAbort(t, srcBck, m, sleep) }) 2688 t.Run("DryRun", func(t *testing.T) { f(); testCopyBucketDryRun(t, srcBck, m) }) 2689 } 2690 2691 func testCopyBucketStats(t *testing.T, srcBck cmn.Bck, m *ioContext) { 2692 dstBck := cmn.Bck{Name: "cpybck_dst" + cos.GenTie(), Provider: apc.AIS} 2693 2694 xid, err := api.CopyBucket(baseParams, srcBck, dstBck, &apc.CopyBckMsg{Force: true}) 2695 tassert.CheckFatal(t, err) 2696 t.Cleanup(func() { 2697 tools.DestroyBucket(t, proxyURL, dstBck) 2698 }) 2699 2700 args := xact.ArgsMsg{ID: xid, Kind: apc.ActCopyBck, Timeout: time.Minute} 2701 _, err = api.WaitForXactionIC(baseParams, &args) 2702 tassert.CheckFatal(t, err) 2703 2704 snaps, err := api.QueryXactionSnaps(baseParams, &xact.ArgsMsg{ID: xid}) 2705 tassert.CheckFatal(t, err) 2706 objs, outObjs, inObjs := snaps.ObjCounts(xid) 2707 tassert.Errorf(t, objs == int64(m.num), "expected %d objects copied, got (objs=%d, outObjs=%d, inObjs=%d)", 2708 m.num, objs, outObjs, inObjs) 2709 if outObjs != inObjs { 2710 tlog.Logf("Warning: (sent objects) %d != %d (received objects)\n", outObjs, inObjs) 2711 } else { 2712 tlog.Logf("Num sent/received objects: %d\n", outObjs) 2713 } 2714 expectedBytesCnt := int64(m.fileSize * uint64(m.num)) 2715 locBytes, outBytes, inBytes := snaps.ByteCounts(xid) 2716 tassert.Errorf(t, locBytes == expectedBytesCnt, "expected %d bytes copied, got (bytes=%d, outBytes=%d, inBytes=%d)", 2717 expectedBytesCnt, locBytes, outBytes, inBytes) 2718 } 2719 2720 func testCopyBucketPrepend(t *testing.T, srcBck cmn.Bck, m *ioContext) { 2721 tools.CheckSkip(t, &tools.SkipTestArgs{Long: true}) 2722 var ( 2723 cpyPrefix = "cpy/virt" + trand.String(5) + "/" 2724 dstBck = cmn.Bck{Name: "cpybck_dst" + cos.GenTie(), Provider: apc.AIS} 2725 ) 2726 2727 xid, err := api.CopyBucket(baseParams, srcBck, dstBck, &apc.CopyBckMsg{Prepend: cpyPrefix}) 2728 tassert.CheckFatal(t, err) 2729 t.Cleanup(func() { 2730 tools.DestroyBucket(t, proxyURL, dstBck) 2731 }) 2732 2733 tlog.Logf("Wating for x-%s[%s] %s => %s\n", apc.ActCopyBck, xid, srcBck, dstBck) 2734 args := xact.ArgsMsg{ID: xid, Kind: apc.ActCopyBck, Timeout: time.Minute} 2735 _, err = api.WaitForXactionIC(baseParams, &args) 2736 tassert.CheckFatal(t, err) 2737 2738 list, err := api.ListObjects(baseParams, dstBck, nil, api.ListArgs{}) 2739 tassert.CheckFatal(t, err) 2740 tassert.Errorf(t, len(list.Entries) == m.num, "expected %d to be copied, got %d", m.num, len(list.Entries)) 2741 for _, e := range list.Entries { 2742 tassert.Fatalf(t, strings.HasPrefix(e.Name, cpyPrefix), "expected %q to have prefix %q", e.Name, cpyPrefix) 2743 } 2744 } 2745 2746 func testCopyBucketPrefix(t *testing.T, srcBck cmn.Bck, m *ioContext, expected int) { 2747 tools.CheckSkip(t, &tools.SkipTestArgs{Long: true}) 2748 var ( 2749 dstBck = cmn.Bck{Name: "cpybck_dst" + cos.GenTie(), Provider: apc.AIS} 2750 ) 2751 2752 xid, err := api.CopyBucket(baseParams, srcBck, dstBck, &apc.CopyBckMsg{Prefix: m.prefix}) 2753 tassert.CheckFatal(t, err) 2754 t.Cleanup(func() { 2755 tools.DestroyBucket(t, proxyURL, dstBck) 2756 }) 2757 2758 tlog.Logf("Wating for x-%s[%s] %s => %s\n", apc.ActCopyBck, xid, srcBck, dstBck) 2759 args := xact.ArgsMsg{ID: xid, Kind: apc.ActCopyBck, Timeout: time.Minute} 2760 _, err = api.WaitForXactionIC(baseParams, &args) 2761 tassert.CheckFatal(t, err) 2762 2763 list, err := api.ListObjects(baseParams, dstBck, nil, api.ListArgs{}) 2764 tassert.CheckFatal(t, err) 2765 tassert.Errorf(t, len(list.Entries) == expected, "expected %d to be copied, got %d", m.num, len(list.Entries)) 2766 for _, e := range list.Entries { 2767 tassert.Fatalf(t, strings.HasPrefix(e.Name, m.prefix), "expected %q to have prefix %q", e.Name, m.prefix) 2768 } 2769 } 2770 2771 func testCopyBucketAbort(t *testing.T, srcBck cmn.Bck, m *ioContext, sleep time.Duration) { 2772 dstBck := cmn.Bck{Name: testBucketName + cos.GenTie(), Provider: apc.AIS} 2773 2774 xid, err := api.CopyBucket(baseParams, srcBck, dstBck, &apc.CopyBckMsg{Force: true}) 2775 tassert.CheckError(t, err) 2776 t.Cleanup(func() { 2777 tools.DestroyBucket(t, m.proxyURL, dstBck) 2778 }) 2779 2780 time.Sleep(sleep) 2781 2782 tlog.Logf("Aborting x-%s[%s]\n", apc.ActCopyBck, xid) 2783 err = api.AbortXaction(baseParams, &xact.ArgsMsg{ID: xid}) 2784 tassert.CheckError(t, err) 2785 2786 time.Sleep(time.Second) 2787 snaps, err := api.QueryXactionSnaps(baseParams, &xact.ArgsMsg{ID: xid}) 2788 tassert.CheckError(t, err) 2789 aborted, err := snaps.IsAborted(xid) 2790 tassert.CheckError(t, err) 2791 tassert.Errorf(t, aborted, "failed to abort copy-bucket: %q, %v", xid, err) 2792 2793 bcks, err := api.ListBuckets(baseParams, cmn.QueryBcks(dstBck), apc.FltExists) 2794 tassert.CheckError(t, err) 2795 tassert.Errorf(t, !tools.BucketsContain(bcks, cmn.QueryBcks(dstBck)), "should not contain destination bucket %s", dstBck) 2796 } 2797 2798 func testCopyBucketDryRun(t *testing.T, srcBck cmn.Bck, m *ioContext) { 2799 tools.CheckSkip(t, &tools.SkipTestArgs{Long: true}) 2800 dstBck := cmn.Bck{Name: "cpybck_dst" + cos.GenTie() + trand.String(5), Provider: apc.AIS} 2801 2802 xid, err := api.CopyBucket(baseParams, srcBck, dstBck, &apc.CopyBckMsg{DryRun: true}) 2803 tassert.CheckFatal(t, err) 2804 t.Cleanup(func() { 2805 tools.DestroyBucket(t, proxyURL, dstBck) 2806 }) 2807 2808 tlog.Logf("Wating for x-%s[%s]\n", apc.ActCopyBck, xid) 2809 args := xact.ArgsMsg{ID: xid, Kind: apc.ActCopyBck, Timeout: time.Minute} 2810 _, err = api.WaitForXactionIC(baseParams, &args) 2811 tassert.CheckFatal(t, err) 2812 2813 snaps, err := api.QueryXactionSnaps(baseParams, &xact.ArgsMsg{ID: xid}) 2814 tassert.CheckFatal(t, err) 2815 2816 locObjs, outObjs, inObjs := snaps.ObjCounts(xid) 2817 tassert.Errorf(t, locObjs+outObjs == int64(m.num), "expected %d objects, got (locObjs=%d, outObjs=%d, inObjs=%d)", 2818 m.num, locObjs, outObjs, inObjs) 2819 2820 locBytes, outBytes, inBytes := snaps.ByteCounts(xid) 2821 expectedBytesCnt := int64(m.fileSize * uint64(m.num)) 2822 tassert.Errorf(t, locBytes+outBytes == expectedBytesCnt, "expected %d bytes, got (locBytes=%d, outBytes=%d, inBytes=%d)", 2823 expectedBytesCnt, locBytes, outBytes, inBytes) 2824 2825 exists, err := api.QueryBuckets(baseParams, cmn.QueryBcks(dstBck), apc.FltExists) 2826 tassert.CheckFatal(t, err) 2827 tassert.Errorf(t, exists == false, "expected destination bucket to not be created") 2828 } 2829 2830 // Tries to rename and then copy bucket at the same time. 2831 func TestRenameAndCopyBucket(t *testing.T) { 2832 var ( 2833 baseParams = tools.BaseAPIParams() 2834 src = cmn.Bck{Name: testBucketName + "_rc_src", Provider: apc.AIS} 2835 m = ioContext{t: t, bck: src, num: 500} 2836 dst1 = cmn.Bck{Name: testBucketName + "_rc_dst1", Provider: apc.AIS} 2837 dst2 = cmn.Bck{Name: testBucketName + "_rc_dst2", Provider: apc.AIS} 2838 ) 2839 tools.CheckSkip(t, &tools.SkipTestArgs{Long: true}) 2840 m.initAndSaveState(true /*cleanup*/) 2841 m.expectTargets(1) 2842 tools.DestroyBucket(t, m.proxyURL, dst1) 2843 2844 tools.CreateBucket(t, m.proxyURL, src, nil, true /*cleanup*/) 2845 defer func() { 2846 tools.DestroyBucket(t, m.proxyURL, dst1) 2847 tools.DestroyBucket(t, m.proxyURL, dst2) 2848 }() 2849 2850 m.puts() 2851 2852 // Rename as dst1 2853 tlog.Logf("Rename %s => %s\n", src, dst1) 2854 xid, err := api.RenameBucket(baseParams, src, dst1) 2855 if err != nil && ensurePrevRebalanceIsFinished(baseParams, err) { 2856 // retry just once 2857 xid, err = api.RenameBucket(baseParams, src, dst1) 2858 } 2859 tassert.CheckFatal(t, err) 2860 tlog.Logf("x-%s[%s] in progress...\n", apc.ActMoveBck, xid) 2861 2862 // Try to copy src to dst1 - and note that rename src => dst1 in progress 2863 tlog.Logf("Copy %s => %s (note: expecting to fail)\n", src, dst1) 2864 _, err = api.CopyBucket(baseParams, src, dst1, nil) 2865 tassert.Fatalf(t, err != nil, "expected copy %s => %s to fail", src, dst1) 2866 2867 // Try to copy bucket that is being renamed 2868 tlog.Logf("Copy %s => %s (note: expecting to fail)\n", src, dst2) 2869 _, err = api.CopyBucket(baseParams, src, dst2, nil) 2870 tassert.Fatalf(t, err != nil, "expected copy %s => %s to fail", src, dst2) 2871 2872 // Try to copy from dst1 to dst1 2873 tlog.Logf("Copy %s => %s (note: expecting to fail)\n", dst1, dst2) 2874 _, err = api.CopyBucket(baseParams, src, dst1, nil) 2875 tassert.Fatalf(t, err != nil, "expected copy %s => %s to fail (as %s is the renaming destination)", dst1, dst2, dst1) 2876 2877 // Wait for rename to finish 2878 tlog.Logf("Waiting for x-%s[%s] to finish\n", apc.ActMoveBck, xid) 2879 time.Sleep(2 * time.Second) 2880 args := xact.ArgsMsg{ID: xid, Kind: apc.ActMoveBck, Timeout: tools.RebalanceTimeout} 2881 _, err = api.WaitForXactionIC(baseParams, &args) 2882 tassert.CheckFatal(t, err) 2883 2884 time.Sleep(time.Second) 2885 2886 // 2887 // more checks 2888 // 2889 tlog.Logln("Listing and counting") 2890 bcks, err := api.ListBuckets(baseParams, cmn.QueryBcks{Provider: apc.AIS}, apc.FltExists) 2891 tassert.CheckFatal(t, err) 2892 2893 tassert.Fatalf(t, !tools.BucketsContain(bcks, cmn.QueryBcks(src)), "expected %s to not exist (be renamed from), got %v", src, bcks) 2894 tassert.Fatalf(t, tools.BucketsContain(bcks, cmn.QueryBcks(dst1)), "expected %s to exist (be renamed to), got %v", dst1, bcks) 2895 tassert.Fatalf(t, !tools.BucketsContain(bcks, cmn.QueryBcks(dst2)), "expected %s to not exist (got %v)", dst2, bcks) 2896 2897 list, err := api.ListObjects(baseParams, dst1, nil, api.ListArgs{}) 2898 tassert.CheckFatal(t, err) 2899 tassert.Errorf(t, len(list.Entries) == m.num, "expected %s to have %d, got %d", dst1, m.num, len(list.Entries)) 2900 2901 m.bck = dst1 2902 m.gets(nil, false) 2903 m.ensureNoGetErrors() 2904 m.bck = src 2905 } 2906 2907 // Tries to copy and then rename bucket at the same time - similar to 2908 // `TestRenameAndCopyBucket` but in different order of operations. 2909 // TODO: This test should be enabled (not skipped) 2910 func TestCopyAndRenameBucket(t *testing.T) { 2911 t.Skip("fails - necessary checks are not yet implemented") 2912 2913 var ( 2914 m = ioContext{ 2915 t: t, 2916 num: 500, 2917 } 2918 baseParams = tools.BaseAPIParams() 2919 dstBck1 = cmn.Bck{ 2920 Name: testBucketName + "_new1", 2921 Provider: apc.AIS, 2922 } 2923 dstBck2 = cmn.Bck{ 2924 Name: testBucketName + "_new2", 2925 Provider: apc.AIS, 2926 } 2927 ) 2928 2929 m.initAndSaveState(true /*cleanup*/) 2930 m.expectTargets(1) 2931 2932 srcBck := m.bck 2933 tools.CreateBucket(t, m.proxyURL, srcBck, nil, true /*cleanup*/) 2934 defer func() { 2935 tools.DestroyBucket(t, m.proxyURL, dstBck1) 2936 tools.DestroyBucket(t, m.proxyURL, dstBck2) 2937 }() 2938 2939 m.puts() 2940 2941 // Rename to first destination 2942 tlog.Logf("copy %s => %s\n", srcBck, dstBck1) 2943 xid, err := api.CopyBucket(baseParams, srcBck, dstBck1, nil) 2944 tassert.CheckFatal(t, err) 2945 2946 // Try to rename to first destination - copy in progress, both for srcBck and dstBck1 2947 tlog.Logf("try rename %s => %s\n", srcBck, dstBck1) 2948 _, err = api.RenameBucket(baseParams, srcBck, dstBck1) 2949 if err == nil { 2950 t.Error("renaming bucket that is under coping did not fail") 2951 } 2952 2953 // Try to rename to second destination - copy in progress for srcBck 2954 tlog.Logf("try rename %s => %s\n", srcBck, dstBck2) 2955 _, err = api.RenameBucket(baseParams, srcBck, dstBck2) 2956 if err == nil { 2957 t.Error("renaming bucket that is under coping did not fail") 2958 } 2959 2960 // Try to rename from dstBck1 to dstBck1 - rename in progress for dstBck1 2961 tlog.Logf("try rename %s => %s\n", dstBck1, dstBck2) 2962 _, err = api.RenameBucket(baseParams, srcBck, dstBck1) 2963 if err == nil { 2964 t.Error("renaming bucket that is under coping did not fail") 2965 } 2966 2967 // Wait for copy to complete 2968 args := xact.ArgsMsg{ID: xid, Kind: apc.ActMoveBck, Timeout: tools.RebalanceTimeout} 2969 _, err = api.WaitForXactionIC(baseParams, &args) 2970 tassert.CheckFatal(t, err) 2971 2972 // Check if the new bucket appears in the list 2973 bcks, err := api.ListBuckets(baseParams, cmn.QueryBcks(srcBck), apc.FltExists) 2974 tassert.CheckFatal(t, err) 2975 2976 if tools.BucketsContain(bcks, cmn.QueryBcks(srcBck)) { 2977 t.Error("source bucket found in buckets list") 2978 } 2979 if !tools.BucketsContain(bcks, cmn.QueryBcks(dstBck1)) { 2980 t.Error("destination bucket not found in buckets list") 2981 } 2982 if tools.BucketsContain(bcks, cmn.QueryBcks(dstBck2)) { 2983 t.Error("second (failed) destination bucket found in buckets list") 2984 } 2985 } 2986 2987 func TestBackendBucket(t *testing.T) { 2988 var ( 2989 remoteBck = cliBck 2990 aisBck = cmn.Bck{ 2991 Name: trand.String(10), 2992 Provider: apc.AIS, 2993 } 2994 m = ioContext{ 2995 t: t, 2996 num: 10, 2997 bck: remoteBck, 2998 prefix: t.Name(), 2999 } 3000 3001 proxyURL = tools.RandomProxyURL(t) 3002 baseParams = tools.BaseAPIParams(proxyURL) 3003 ) 3004 3005 tools.CheckSkip(t, &tools.SkipTestArgs{CloudBck: true, Bck: remoteBck}) 3006 3007 m.init(true /*cleanup*/) 3008 3009 tools.CreateBucket(t, proxyURL, aisBck, nil, true /*cleanup*/) 3010 3011 p, err := api.HeadBucket(baseParams, remoteBck, true /* don't add */) 3012 tassert.CheckFatal(t, err) 3013 remoteBck.Provider = p.Provider 3014 3015 m.remotePuts(false /*evict*/) 3016 3017 msg := &apc.LsoMsg{Prefix: m.prefix} 3018 remoteObjList, err := api.ListObjects(baseParams, remoteBck, msg, api.ListArgs{}) 3019 tassert.CheckFatal(t, err) 3020 tassert.Fatalf(t, len(remoteObjList.Entries) > 0, "empty object list") 3021 3022 // Connect backend bucket to a aisBck 3023 _, err = api.SetBucketProps(baseParams, aisBck, &cmn.BpropsToSet{ 3024 BackendBck: &cmn.BackendBckToSet{ 3025 Name: apc.Ptr(remoteBck.Name), 3026 Provider: apc.Ptr(remoteBck.Provider), 3027 }, 3028 }) 3029 tassert.CheckFatal(t, err) 3030 // Try putting one of the original remote objects, it should work. 3031 err = tools.PutObjRR(baseParams, aisBck, remoteObjList.Entries[0].Name, 128, cos.ChecksumNone) 3032 tassert.Errorf(t, err == nil, "expected err==nil (put to a BackendBck should be allowed via aisBck)") 3033 3034 p, err = api.HeadBucket(baseParams, aisBck, true /* don't add */) 3035 tassert.CheckFatal(t, err) 3036 tassert.Fatalf( 3037 t, p.BackendBck.Equal(&remoteBck), 3038 "backend bucket wasn't set correctly (got: %s, expected: %s)", 3039 p.BackendBck, remoteBck, 3040 ) 3041 3042 // Try to cache object. 3043 cachedObjName := remoteObjList.Entries[0].Name 3044 _, err = api.GetObject(baseParams, aisBck, cachedObjName, nil) 3045 tassert.CheckFatal(t, err) 3046 3047 // Check if listing objects will result in listing backend bucket objects. 3048 msg.AddProps(apc.GetPropsAll...) 3049 aisObjList, err := api.ListObjects(baseParams, aisBck, msg, api.ListArgs{}) 3050 tassert.CheckFatal(t, err) 3051 tassert.Fatalf( 3052 t, len(remoteObjList.Entries) == len(aisObjList.Entries), 3053 "object lists remote vs ais does not match (got: %+v, expected: %+v)", 3054 aisObjList.Entries, remoteObjList.Entries, 3055 ) 3056 3057 // Check if cached listing works correctly. 3058 cacheMsg := &apc.LsoMsg{Flags: apc.LsObjCached, Prefix: m.prefix} 3059 aisObjList, err = api.ListObjects(baseParams, aisBck, cacheMsg, api.ListArgs{}) 3060 tassert.CheckFatal(t, err) 3061 tassert.Fatalf( 3062 t, len(aisObjList.Entries) == 1, 3063 "bucket contains incorrect number of cached objects (got: %+v, expected: [%s])", 3064 aisObjList.Entries, cachedObjName, 3065 ) 3066 3067 // Disallow PUT (TODO: use apc.AceObjUpdate instead, when/if supported) 3068 3069 aattrs := apc.AccessAll &^ apc.AcePUT 3070 _, err = api.SetBucketProps(baseParams, aisBck, &cmn.BpropsToSet{ 3071 BackendBck: &cmn.BackendBckToSet{ 3072 Name: apc.Ptr(""), 3073 Provider: apc.Ptr(""), 3074 }, 3075 Access: apc.Ptr(aattrs), 3076 }) 3077 tassert.CheckFatal(t, err) 3078 p, err = api.HeadBucket(baseParams, aisBck, true /* don't add */) 3079 tassert.CheckFatal(t, err) 3080 tassert.Fatalf(t, p.BackendBck.IsEmpty(), "backend bucket is still configured: %s", p.BackendBck.String()) 3081 3082 // Check that we can still GET and list-objects 3083 _, err = api.GetObject(baseParams, aisBck, cachedObjName, nil) 3084 tassert.CheckFatal(t, err) 3085 3086 aisObjList, err = api.ListObjects(baseParams, aisBck, msg, api.ListArgs{}) 3087 tassert.CheckFatal(t, err) 3088 tassert.Fatalf( 3089 t, len(aisObjList.Entries) == 1, 3090 "bucket contains incorrect number of objects (got: %+v, expected: [%s])", 3091 aisObjList.Entries, cachedObjName, 3092 ) 3093 3094 // Check that we cannot cold GET anymore - no backend 3095 tlog.Logln("Trying to cold-GET when there's no backend anymore (expecting to fail)") 3096 _, err = api.GetObject(baseParams, aisBck, remoteObjList.Entries[1].Name, nil) 3097 tassert.Fatalf(t, err != nil, "expected error (object should not exist)") 3098 3099 // Check that we cannot do PUT anymore. 3100 tlog.Logln("Trying to PUT 2nd version (expecting to fail)") 3101 err = tools.PutObjRR(baseParams, aisBck, cachedObjName, 256, cos.ChecksumNone) 3102 tassert.Errorf(t, err != nil, "expected err != nil") 3103 } 3104 3105 // 3106 // even more checksum tests 3107 // 3108 3109 func TestAllChecksums(t *testing.T) { 3110 checksums := cos.SupportedChecksums() 3111 for _, mirrored := range []bool{false, true} { 3112 for _, cksumType := range checksums { 3113 if testing.Short() && cksumType != cos.ChecksumNone && cksumType != cos.ChecksumXXHash { 3114 continue 3115 } 3116 tag := cksumType 3117 if mirrored { 3118 tag = cksumType + "/mirrored" 3119 } 3120 t.Run(tag, func(t *testing.T) { 3121 started := time.Now() 3122 testWarmValidation(t, cksumType, mirrored, false) 3123 tlog.Logf("Time: %v\n", time.Since(started)) 3124 }) 3125 } 3126 } 3127 3128 for _, cksumType := range checksums { 3129 if testing.Short() && cksumType != cos.ChecksumNone && cksumType != cos.ChecksumXXHash { 3130 continue 3131 } 3132 tag := cksumType + "/EC" 3133 t.Run(tag, func(t *testing.T) { 3134 tools.CheckSkip(t, &tools.SkipTestArgs{MinTargets: 4}) 3135 3136 started := time.Now() 3137 testWarmValidation(t, cksumType, false, true) 3138 tlog.Logf("Time: %v\n", time.Since(started)) 3139 }) 3140 } 3141 } 3142 3143 func testWarmValidation(t *testing.T, cksumType string, mirrored, eced bool) { 3144 const ( 3145 copyCnt = 2 3146 parityCnt = 2 3147 xactTimeout = 10 * time.Second 3148 ) 3149 var ( 3150 m = ioContext{ 3151 t: t, 3152 num: 1000, 3153 numGetsEachFile: 1, 3154 fileSize: uint64(cos.KiB + rand.Int63n(cos.KiB*10)), 3155 } 3156 numCorrupted = rand.Intn(m.num/100) + 2 3157 ) 3158 if testing.Short() { 3159 m.num = 40 3160 m.fileSize = cos.KiB 3161 numCorrupted = 13 3162 } 3163 3164 m.initAndSaveState(true /*cleanup*/) 3165 baseParams := tools.BaseAPIParams(m.proxyURL) 3166 tools.CreateBucket(t, m.proxyURL, m.bck, nil, true /*cleanup*/) 3167 3168 { 3169 if mirrored { 3170 _, err := api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{ 3171 Cksum: &cmn.CksumConfToSet{ 3172 Type: apc.Ptr(cksumType), 3173 ValidateWarmGet: apc.Ptr(true), 3174 }, 3175 Mirror: &cmn.MirrorConfToSet{ 3176 Enabled: apc.Ptr(true), 3177 Copies: apc.Ptr[int64](copyCnt), 3178 }, 3179 }) 3180 tassert.CheckFatal(t, err) 3181 } else if eced { 3182 if m.smap.CountActiveTs() < parityCnt+1 { 3183 t.Fatalf("Not enough targets to run %s test, must be at least %d", t.Name(), parityCnt+1) 3184 } 3185 _, err := api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{ 3186 Cksum: &cmn.CksumConfToSet{ 3187 Type: apc.Ptr(cksumType), 3188 ValidateWarmGet: apc.Ptr(true), 3189 }, 3190 EC: &cmn.ECConfToSet{ 3191 Enabled: apc.Ptr(true), 3192 ObjSizeLimit: apc.Ptr[int64](cos.GiB), // only slices 3193 DataSlices: apc.Ptr(1), 3194 ParitySlices: apc.Ptr(parityCnt), 3195 }, 3196 }) 3197 tassert.CheckFatal(t, err) 3198 } else { 3199 _, err := api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{ 3200 Cksum: &cmn.CksumConfToSet{ 3201 Type: apc.Ptr(cksumType), 3202 ValidateWarmGet: apc.Ptr(true), 3203 }, 3204 }) 3205 tassert.CheckFatal(t, err) 3206 } 3207 3208 p, err := api.HeadBucket(baseParams, m.bck, true /* don't add */) 3209 tassert.CheckFatal(t, err) 3210 if p.Cksum.Type != cksumType { 3211 t.Fatalf("failed to set checksum: %q != %q", p.Cksum.Type, cksumType) 3212 } 3213 if !p.Cksum.ValidateWarmGet { 3214 t.Fatal("failed to set checksum: validate_warm_get not enabled") 3215 } 3216 if mirrored && !p.Mirror.Enabled { 3217 t.Fatal("failed to mirroring") 3218 } 3219 if eced && !p.EC.Enabled { 3220 t.Fatal("failed to enable erasure coding") 3221 } 3222 } 3223 3224 m.puts() 3225 3226 // wait for mirroring 3227 if mirrored { 3228 args := xact.ArgsMsg{Kind: apc.ActPutCopies, Bck: m.bck, Timeout: xactTimeout} 3229 api.WaitForXactionIdle(baseParams, &args) 3230 // NOTE: ref 1377 3231 m.ensureNumCopies(baseParams, copyCnt, false /*greaterOk*/) 3232 } 3233 // wait for erasure-coding 3234 if eced { 3235 args := xact.ArgsMsg{Kind: apc.ActECPut, Bck: m.bck, Timeout: xactTimeout} 3236 api.WaitForXactionIdle(baseParams, &args) 3237 } 3238 3239 // read all 3240 if cksumType != cos.ChecksumNone { 3241 tlog.Logf("Reading %q objects with checksum validation\n", m.bck) 3242 } else { 3243 tlog.Logf("Reading %q objects\n", m.bck) 3244 } 3245 m.gets(nil, false) 3246 3247 msg := &apc.LsoMsg{} 3248 bckObjs, err := api.ListObjects(baseParams, m.bck, msg, api.ListArgs{}) 3249 tassert.CheckFatal(t, err) 3250 if len(bckObjs.Entries) == 0 { 3251 t.Errorf("%s is empty\n", m.bck) 3252 return 3253 } 3254 3255 if cksumType != cos.ChecksumNone { 3256 tlog.Logf("Reading %d objects from %s with end-to-end %s validation\n", len(bckObjs.Entries), m.bck, cksumType) 3257 wg := cos.NewLimitedWaitGroup(20, 0) 3258 3259 for _, en := range bckObjs.Entries { 3260 wg.Add(1) 3261 go func(name string) { 3262 defer wg.Done() 3263 _, err = api.GetObjectWithValidation(baseParams, m.bck, name, nil) 3264 tassert.CheckError(t, err) 3265 }(en.Name) 3266 } 3267 3268 wg.Wait() 3269 } 3270 3271 if docker.IsRunning() { 3272 tlog.Logf("skipping %s object corruption (docker is not supported)\n", t.Name()) 3273 return 3274 } 3275 3276 initMountpaths(t, proxyURL) 3277 // corrupt random and read again 3278 { 3279 i := rand.Intn(len(bckObjs.Entries)) 3280 if i+numCorrupted > len(bckObjs.Entries) { 3281 i -= numCorrupted 3282 } 3283 objCh := make(chan string, numCorrupted) 3284 tlog.Logf("Corrupting %d objects\n", numCorrupted) 3285 go func() { 3286 for j := i; j < i+numCorrupted; j++ { 3287 objName := bckObjs.Entries[j].Name 3288 corruptSingleBitInFile(t, m.bck, objName) 3289 objCh <- objName 3290 } 3291 }() 3292 for range numCorrupted { 3293 objName := <-objCh 3294 _, err = api.GetObject(baseParams, m.bck, objName, nil) 3295 if mirrored || eced { 3296 if err != nil && cksumType != cos.ChecksumNone { 3297 if eced { 3298 // retry EC 3299 time.Sleep(2 * time.Second) 3300 _, err = api.GetObject(baseParams, m.bck, objName, nil) 3301 } 3302 if err != nil { 3303 t.Errorf("%s corruption detected but not resolved, mirror=%t, ec=%t\n", 3304 m.bck.Cname(objName), mirrored, eced) 3305 } 3306 } 3307 } else { 3308 if err == nil && cksumType != cos.ChecksumNone { 3309 t.Errorf("%s corruption undetected\n", m.bck.Cname(objName)) 3310 } 3311 } 3312 } 3313 } 3314 } 3315 3316 func TestBucketListAndSummary(t *testing.T) { 3317 tools.CheckSkip(t, &tools.SkipTestArgs{Long: true}) 3318 3319 type test struct { 3320 provider string 3321 summary bool 3322 cached bool 3323 } 3324 3325 providers := []string{apc.AIS} 3326 if cliBck.IsRemote() { 3327 providers = append(providers, cliBck.Provider) 3328 } 3329 3330 var tests []test 3331 for _, provider := range providers { 3332 for _, summary := range []bool{false, true} { 3333 for _, cached := range []bool{false, true} { 3334 tests = append(tests, test{ 3335 provider: provider, 3336 summary: summary, 3337 cached: cached, 3338 }) 3339 } 3340 } 3341 } 3342 3343 for _, test := range tests { 3344 p := make([]string, 3) 3345 p[0] = test.provider 3346 p[1] = "list" 3347 if test.summary { 3348 p[1] = "summary" 3349 } 3350 p[2] = "all" 3351 if test.cached { 3352 p[2] = "cached" 3353 } 3354 t.Run(strings.Join(p, "/"), func(t *testing.T) { 3355 var ( 3356 m = &ioContext{ 3357 t: t, 3358 bck: cmn.Bck{ 3359 Name: trand.String(10), 3360 Provider: test.provider, 3361 }, 3362 3363 num: 12345, 3364 } 3365 baseParams = tools.BaseAPIParams() 3366 3367 expectedFiles = m.num 3368 cacheSize int 3369 ) 3370 3371 m.initAndSaveState(true /*cleanup*/) 3372 m.expectTargets(1) 3373 3374 if m.bck.IsAIS() { 3375 tools.CreateBucket(t, m.proxyURL, m.bck, nil, true /*cleanup*/) 3376 m.puts() 3377 } else if m.bck.IsRemote() { 3378 m.bck = cliBck 3379 tools.CheckSkip(t, &tools.SkipTestArgs{RemoteBck: true, Bck: m.bck}) 3380 tlog.Logf("remote %s\n", m.bck.Cname("")) 3381 m.del(-1 /* delete all */) 3382 3383 m.num /= 10 3384 cacheSize = m.num / 2 3385 expectedFiles = m.num 3386 3387 m.remotePuts(true /*evict*/) 3388 if test.cached { 3389 m.remotePrefetch(cacheSize) 3390 expectedFiles = cacheSize 3391 } 3392 } else { 3393 t.Fatal(test.provider) 3394 } 3395 3396 tlog.Logln("checking objects...") 3397 3398 if test.summary { 3399 msg := &apc.BsummCtrlMsg{ObjCached: test.cached} 3400 xid, summaries, err := api.GetBucketSummary(baseParams, cmn.QueryBcks(m.bck), msg, api.BsummArgs{}) 3401 tassert.CheckFatal(t, err) 3402 3403 if len(summaries) == 0 { 3404 t.Fatalf("x-%s[%s] summary for bucket %q should exist", apc.ActSummaryBck, xid, m.bck) 3405 } 3406 if len(summaries) != 1 { 3407 t.Fatalf("x-%s[%s] number of summaries (%d) is larger than 1", apc.ActSummaryBck, xid, len(summaries)) 3408 } 3409 3410 summary := summaries[0] 3411 if summary.ObjCount.Remote+summary.ObjCount.Present != uint64(expectedFiles) { 3412 t.Errorf("x-%s[%s] %s: number of objects in summary (%+v) differs from expected (%d)", 3413 apc.ActSummaryBck, xid, m.bck, summary.ObjCount, expectedFiles) 3414 } 3415 } else { 3416 msg := &apc.LsoMsg{PageSize: int64(min(m.num/3, 256))} // mult. pages 3417 if test.cached { 3418 msg.Flags = apc.LsObjCached 3419 } 3420 objList, err := api.ListObjects(baseParams, m.bck, msg, api.ListArgs{}) 3421 tassert.CheckFatal(t, err) 3422 3423 if len(objList.Entries) != expectedFiles { 3424 t.Errorf("number of listed objects (%d) is different from expected (%d)", 3425 len(objList.Entries), expectedFiles) 3426 } 3427 } 3428 }) 3429 } 3430 } 3431 3432 func TestListObjectsNoRecursion(t *testing.T) { 3433 type test struct { 3434 prefix string 3435 count int 3436 } 3437 var ( 3438 bck = cmn.Bck{ 3439 Name: t.Name() + "Bucket", 3440 Provider: apc.AIS, 3441 } 3442 proxyURL = tools.RandomProxyURL(t) 3443 baseParams = tools.BaseAPIParams(proxyURL) 3444 objs = []string{ 3445 "img001", "vid001", 3446 "img-test/obj1", "img-test/vid1", "img-test/pics/obj01", 3447 "img003", "img-test/pics/vid01"} 3448 tests = []test{ 3449 {prefix: "", count: 4}, 3450 {prefix: "img-test", count: 3}, 3451 {prefix: "img-test/", count: 3}, 3452 {prefix: "img-test/pics", count: 3}, 3453 {prefix: "img-test/pics/", count: 3}, 3454 } 3455 ) 3456 3457 tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/) 3458 for _, nm := range objs { 3459 objectSize := int64(rand.Intn(256) + 20) 3460 reader, _ := readers.NewRand(objectSize, cos.ChecksumNone) 3461 _, err := api.PutObject(&api.PutArgs{ 3462 BaseParams: baseParams, 3463 Bck: bck, 3464 ObjName: nm, 3465 Reader: reader, 3466 }) 3467 tassert.CheckFatal(t, err) 3468 } 3469 3470 msg := &apc.LsoMsg{Props: apc.GetPropsName} 3471 lst, err := api.ListObjects(baseParams, bck, msg, api.ListArgs{}) 3472 tassert.CheckFatal(t, err) 3473 tassert.Fatalf(t, len(lst.Entries) == len(objs), "Invalid number of objects %d vs %d", len(lst.Entries), len(objs)) 3474 3475 for idx, tst := range tests { 3476 msg := &apc.LsoMsg{Flags: apc.LsNoRecursion | apc.LsNameSize, Prefix: tst.prefix} 3477 lst, err := api.ListObjects(baseParams, bck, msg, api.ListArgs{}) 3478 tassert.CheckFatal(t, err) 3479 3480 if tst.count == len(lst.Entries) { 3481 continue 3482 } 3483 tlog.Logf("Failed test #%d (prefix %s). Expected %d, got %d\n", 3484 idx, tst.prefix, tst.count, len(lst.Entries)) 3485 for idx, en := range lst.Entries { 3486 tlog.Logf("%d. %s (%v)\n", idx, en.Name, en.Flags) 3487 } 3488 tassert.Errorf(t, false, "[%s] Invalid number of objects %d (expected %d)", tst.prefix, len(lst.Entries), tst.count) 3489 } 3490 }