github.com/NVIDIA/aistore@v1.3.23-0.20240517131212-7df6609be51d/ais/test/objprops_test.go (about) 1 // Package integration_test. 2 /* 3 * Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved. 4 */ 5 package integration_test 6 7 import ( 8 "fmt" 9 "strconv" 10 "sync" 11 "testing" 12 13 "github.com/NVIDIA/aistore/api" 14 "github.com/NVIDIA/aistore/api/apc" 15 "github.com/NVIDIA/aistore/cmn" 16 "github.com/NVIDIA/aistore/cmn/cos" 17 "github.com/NVIDIA/aistore/core/meta" 18 "github.com/NVIDIA/aistore/stats" 19 "github.com/NVIDIA/aistore/tools" 20 "github.com/NVIDIA/aistore/tools/readers" 21 "github.com/NVIDIA/aistore/tools/tassert" 22 "github.com/NVIDIA/aistore/tools/tlog" 23 "github.com/NVIDIA/aistore/xact" 24 ) 25 26 func propsStats(t *testing.T, proxyURL string) (objChanged, bytesChanged int64) { 27 cstats := tools.GetClusterStats(t, proxyURL) 28 objChanged = 0 29 bytesChanged = 0 30 31 for _, v := range cstats.Target { 32 objChanged += tools.GetNamedStatsVal(v, stats.VerChangeCount) 33 bytesChanged += tools.GetNamedStatsVal(v, stats.VerChangeSize) 34 } 35 return 36 } 37 38 func propsUpdateObjects(t *testing.T, proxyURL string, bck cmn.Bck, oldVersions map[string]string, 39 msg *apc.LsoMsg, versionEnabled bool, cksumType string) (newVersions map[string]string) { 40 newVersions = make(map[string]string, len(oldVersions)) 41 tlog.Logln("Updating...") 42 r, err := readers.NewRand(int64(fileSize), cksumType) 43 if err != nil { 44 t.Fatalf("Failed to create reader: %v", err) 45 } 46 baseParams := tools.BaseAPIParams(proxyURL) 47 for fname := range oldVersions { 48 putArgs := api.PutArgs{ 49 BaseParams: baseParams, 50 Bck: bck, 51 ObjName: fname, 52 Cksum: r.Cksum(), 53 Reader: r, 54 } 55 _, err = api.PutObject(&putArgs) 56 if err != nil { 57 t.Errorf("Failed to PUT new data to object %s: %v", bck.Cname(fname), err) 58 } 59 } 60 61 reslist := testListObjects(t, proxyURL, bck, msg) 62 tassert.Errorf(t, len(oldVersions) == len(reslist.Entries), "len(oldVersions) %d != %d len(reslist.Entries)", 63 len(oldVersions), len(reslist.Entries)) 64 65 var ( 66 ver string 67 ok bool 68 ) 69 for _, m := range reslist.Entries { 70 if ver, ok = oldVersions[m.Name]; !ok { 71 continue 72 } 73 newVersions[m.Name] = m.Version 74 75 if !m.IsPresent() && bck.IsRemote() { 76 t.Errorf("%s: not marked as cached one", bck.Cname(m.Name)) 77 } 78 if !versionEnabled { 79 continue 80 } 81 if ver == m.Version { 82 t.Fatalf("%s: version was expected to update", bck.Cname(m.Name)) 83 } else if m.Version == "" { 84 t.Fatalf("%s: version is empty", bck.Cname(m.Name)) 85 } 86 } 87 tlog.Logf("All %d object versions updated\n", len(reslist.Entries)) 88 89 return 90 } 91 92 func propsReadObjects(t *testing.T, proxyURL string, bck cmn.Bck, objList map[string]string) { 93 versChanged, bytesChanged := propsStats(t, proxyURL) 94 baseParams := tools.BaseAPIParams(proxyURL) 95 for objName := range objList { 96 _, err := api.GetObject(baseParams, bck, objName, nil) 97 if err != nil { 98 t.Errorf("Failed to GET %s: %v", bck.Cname(objName), err) 99 continue 100 } 101 } 102 versChangedFinal, bytesChangedFinal := propsStats(t, proxyURL) 103 if versChangedFinal-versChanged > 0 { 104 tlog.Logf("Versions changed: %d (%s)\n", versChangedFinal-versChanged, cos.ToSizeIEC(bytesChangedFinal-bytesChanged, 1)) 105 } 106 if versChanged != versChangedFinal || bytesChanged != bytesChangedFinal { 107 t.Fatalf("All objects must be retreived from the cache but cold get happened: %d times (%d bytes)", 108 versChangedFinal-versChanged, bytesChangedFinal-bytesChanged) 109 } 110 } 111 112 func propsEvict(t *testing.T, proxyURL string, bck cmn.Bck, objMap map[string]string, msg *apc.LsoMsg, versionEnabled bool) { 113 // generate object list to evict 1/3rd of all objects - random selection 114 toEvict := len(objMap) / 3 115 if toEvict == 0 { 116 toEvict = 1 117 } 118 toEvictList := make([]string, 0, toEvict) 119 evictMap := make(map[string]bool, toEvict) 120 tlog.Logf("Evicting %v objects:\n", toEvict) 121 122 for fname := range objMap { 123 evictMap[fname] = true 124 toEvictList = append(toEvictList, fname) 125 tlog.Logf(" %s\n", bck.Cname(fname)) 126 if len(toEvictList) >= toEvict { 127 break 128 } 129 } 130 131 baseParams := tools.BaseAPIParams(proxyURL) 132 xid, err := api.EvictMultiObj(baseParams, bck, toEvictList, "" /*template*/) 133 if err != nil { 134 t.Errorf("Failed to evict objects: %v\n", err) 135 } 136 args := xact.ArgsMsg{ID: xid, Kind: apc.ActEvictObjects, Timeout: tools.RebalanceTimeout} 137 _, err = api.WaitForXactionIC(baseParams, &args) 138 tassert.CheckFatal(t, err) 139 140 tlog.Logf("Reading object list...\n") 141 142 // read a new object list and check that evicted objects do not have atime and cached==false 143 // version must be the same 144 reslist := testListObjects(t, proxyURL, bck, msg) 145 if reslist == nil { 146 return 147 } 148 149 for _, m := range reslist.Entries { 150 oldVersion, ok := objMap[m.Name] 151 if !ok { 152 continue 153 } 154 tlog.Logf("%s [%d] - cached: [%v], atime [%v]\n", bck.Cname(m.Name), m.Flags, m.IsPresent(), m.Atime) 155 156 // e.g. misplaced replica 157 if !m.IsStatusOK() { 158 continue 159 } 160 161 if _, wasEvicted := evictMap[m.Name]; wasEvicted { 162 if m.Atime != "" { 163 t.Errorf("Evicted %s still has atime %q", bck.Cname(m.Name), m.Atime) 164 } 165 if m.IsPresent() { 166 t.Errorf("Evicted %s is still marked as _cached_", bck.Cname(m.Name)) 167 } 168 } 169 170 if !versionEnabled { 171 continue 172 } 173 174 if m.Version == "" { 175 t.Errorf("%s: version is empty", bck.Cname(m.Name)) 176 } else if m.Version != oldVersion { 177 t.Errorf("%s: version has changed from %s to %s", bck.Cname(m.Name), oldVersion, m.Version) 178 } 179 } 180 } 181 182 func propsRecacheObjects(t *testing.T, proxyURL string, bck cmn.Bck, objs map[string]string, msg *apc.LsoMsg, versionEnabled bool) { 183 tlog.Logf("Reading...\n") 184 propsReadObjects(t, proxyURL, bck, objs) 185 186 tlog.Logf("Listing objects...\n") 187 reslist := testListObjects(t, proxyURL, bck, msg) 188 tassert.Fatalf(t, reslist != nil && len(reslist.Entries) > 0, "Unexpected: no objects in the bucket %s", bck) 189 190 var ( 191 version string 192 ok bool 193 ) 194 tlog.Logf("Checking object properties...\n") 195 for _, m := range reslist.Entries { 196 if version, ok = objs[m.Name]; !ok { 197 continue 198 } 199 if !m.IsPresent() { 200 t.Errorf("%s: not marked as cached one", bck.Cname(m.Name)) 201 } 202 if m.Atime == "" { 203 t.Errorf("%s: access time is empty", bck.Cname(m.Name)) 204 } 205 if !versionEnabled { 206 continue 207 } 208 if m.Version == "" { 209 t.Errorf("Failed to read %s version", bck.Cname(m.Name)) 210 } else if version != m.Version { 211 t.Errorf("%s versions mismatch: expected [%s], have[%s]", bck.Cname(m.Name), version, m.Version) 212 } 213 } 214 } 215 216 func propsRebalance(t *testing.T, proxyURL string, bck cmn.Bck, objects map[string]string, msg *apc.LsoMsg, 217 versionEnabled bool, cksumType string) { 218 baseParams := tools.BaseAPIParams(proxyURL) 219 propsCleanupObjects(t, proxyURL, bck, objects) 220 221 smap := tools.GetClusterMap(t, proxyURL) 222 origActiveTargetCnt := smap.CountActiveTs() 223 if origActiveTargetCnt < 2 { 224 t.Skipf("Only %d targets found, need at least 2", origActiveTargetCnt) 225 } 226 227 removeTarget, _ := smap.GetRandTarget() 228 229 args := &apc.ActValRmNode{DaemonID: removeTarget.ID(), SkipRebalance: true} 230 _, err := api.StartMaintenance(baseParams, args) 231 tassert.CheckFatal(t, err) 232 smap, err = tools.WaitForClusterState( 233 proxyURL, 234 "target removed", 235 smap.Version, 236 smap.CountActivePs(), 237 smap.CountActiveTs()-1, 238 ) 239 tassert.CheckFatal(t, err) 240 241 // rewrite objects and compare versions - they should change 242 newobjs := propsUpdateObjects(t, proxyURL, bck, objects, msg, versionEnabled, cksumType) 243 244 args = &apc.ActValRmNode{DaemonID: removeTarget.ID()} 245 rebID, err := api.StopMaintenance(baseParams, args) 246 tassert.CheckFatal(t, err) 247 _, err = tools.WaitForClusterState( 248 proxyURL, 249 "target joined", 250 smap.Version, 251 smap.CountActivePs(), 252 smap.CountActiveTs()+1, 253 ) 254 tassert.CheckFatal(t, err) 255 tools.WaitForRebalanceByID(t, baseParams, rebID) 256 257 tlog.Logf("Listing objects...\n") 258 reslist := testListObjects(t, proxyURL, bck, msg) 259 tassert.Fatalf(t, reslist != nil && len(reslist.Entries) > 0, "Unexpected: no objects in the bucket %s", bck) 260 261 var ( 262 version string 263 ok bool 264 objFound int 265 ) 266 tlog.Logf("Checking object properties...\n") 267 for _, m := range reslist.Entries { 268 if version, ok = newobjs[m.Name]; !ok { 269 continue 270 } 271 if !m.IsStatusOK() { 272 continue 273 } 274 objFound++ 275 if !m.IsPresent() && bck.IsRemote() { 276 t.Errorf("%s: not marked as cached one", bck.Cname(m.Name)) 277 } 278 if m.Atime == "" { 279 t.Errorf("%s: access time is empty", bck.Cname(m.Name)) 280 } 281 if !versionEnabled { 282 continue 283 } 284 if version != m.Version { 285 t.Errorf("%s post-rebalance version mismatch: have [%s], expected [%s]", bck.Cname(m.Name), m.Version, version) 286 } 287 } 288 289 if objFound != len(objects) { 290 t.Errorf("Wrong number of objects after rebalance: have %d, expected %d", objFound, len(objects)) 291 } 292 } 293 294 func propsCleanupObjects(t *testing.T, proxyURL string, bck cmn.Bck, newVersions map[string]string) { 295 errCh := make(chan error, 100) 296 wg := &sync.WaitGroup{} 297 for objName := range newVersions { 298 wg.Add(1) 299 go tools.Del(proxyURL, bck, objName, wg, errCh, true /*silent*/) 300 } 301 wg.Wait() 302 tassert.SelectErr(t, errCh, "delete", true) 303 close(errCh) 304 } 305 306 func TestObjPropsVersion(t *testing.T) { 307 tools.CheckSkip(t, &tools.SkipTestArgs{Long: true}) 308 309 for _, versioning := range []bool{false, true} { 310 t.Run(fmt.Sprintf("enabled=%t", versioning), func(t *testing.T) { 311 propsVersionAllProviders(t, versioning) 312 }) 313 } 314 } 315 316 func propsVersionAllProviders(t *testing.T, versioning bool) { 317 runProviderTests(t, func(t *testing.T, bck *meta.Bck) { 318 config := tools.GetClusterConfig(t) 319 320 oldChkVersion := config.Versioning.ValidateWarmGet 321 oldVersioning := config.Versioning.Enabled 322 323 newConfig := make(cos.StrKVs) 324 if oldVersioning != versioning { 325 newConfig[cmn.PropBucketVerEnabled] = strconv.FormatBool(versioning) 326 } 327 warmCheck := versioning 328 if oldChkVersion != warmCheck { 329 newConfig["versioning.validate_warm_get"] = strconv.FormatBool(warmCheck) 330 } 331 if len(newConfig) != 0 { 332 tools.SetClusterConfig(t, newConfig) 333 } 334 335 defer func() { 336 // restore configuration 337 newConfig := make(cos.StrKVs) 338 oldWarmCheck := oldChkVersion && oldVersioning 339 if oldWarmCheck != warmCheck { 340 newConfig["versioning.validate_warm_get"] = strconv.FormatBool(oldWarmCheck) 341 } 342 if oldVersioning != versioning { 343 newConfig[cmn.PropBucketVerEnabled] = strconv.FormatBool(oldVersioning) 344 } 345 if len(newConfig) != 0 { 346 tools.SetClusterConfig(t, newConfig) 347 } 348 }() 349 350 propsVersion(t, bck.Clone(), bck.Props.Versioning.Enabled, bck.Props.Cksum.Type) 351 }) 352 } 353 354 func propsVersion(t *testing.T, bck cmn.Bck, versionEnabled bool, cksumType string) { 355 var ( 356 m = ioContext{ 357 t: t, 358 bck: bck, 359 num: 15, 360 fileSize: cos.KiB, 361 prefix: "props/obj-", 362 deleteRemoteBckObjs: true, 363 } 364 proxyURL = tools.RandomProxyURL() 365 ) 366 367 m.init(true /*cleanup*/) 368 if m.bck.IsRemote() { 369 m.del(-1 /* delete all */) 370 } 371 m.puts() 372 // Read object versions. 373 msg := &apc.LsoMsg{} 374 msg.AddProps(apc.GetPropsVersion, apc.GetPropsAtime, apc.GetPropsStatus) 375 reslist := testListObjects(t, proxyURL, bck, msg) 376 if reslist == nil { 377 t.Fatalf("Unexpected error: no objects in the bucket %s", bck) 378 return 379 } 380 381 // PUT objects must have all properties set: atime, cached, version 382 filesList := make(map[string]string) 383 for _, m := range reslist.Entries { 384 tlog.Logf("%s initial version:\t%q\n", bck.Cname(m.Name), m.Version) 385 386 if !m.IsPresent() && bck.IsRemote() { 387 t.Errorf("%s: not marked as _cached_", bck.Cname(m.Name)) 388 } 389 if m.Atime == "" { 390 t.Errorf("%s: access time is empty", bck.Cname(m.Name)) 391 } 392 filesList[m.Name] = m.Version 393 if !versionEnabled { 394 continue 395 } 396 if m.Version == "" { 397 t.Fatalf("Failed to read %s version", bck.Cname(m.Name)) 398 } 399 } 400 401 // rewrite objects and compare versions - they should change 402 newVersions := propsUpdateObjects(t, proxyURL, bck, filesList, msg, versionEnabled, cksumType) 403 404 // check that files are read from cache 405 propsReadObjects(t, proxyURL, bck, filesList) 406 407 // TODO: this should work for the remote cluster as well 408 if bck.IsCloud() { 409 // try to evict some files and check if they are gone 410 propsEvict(t, proxyURL, bck, newVersions, msg, versionEnabled) 411 412 // read objects to put them to the cache. After that all objects must have cached=true 413 propsRecacheObjects(t, proxyURL, bck, newVersions, msg, versionEnabled) 414 } 415 416 // test rebalance should keep object versions 417 propsRebalance(t, proxyURL, bck, newVersions, msg, versionEnabled, cksumType) 418 419 // cleanup 420 propsCleanupObjects(t, proxyURL, bck, newVersions) 421 } 422 423 func TestObjProps(t *testing.T) { 424 const ( 425 typeLocal = "local" 426 typeRemoteAIS = "remoteAIS" 427 typeCloud = "cloud" 428 ) 429 var ( 430 proxyURL = tools.RandomProxyURL() 431 baseParams = tools.BaseAPIParams(proxyURL) 432 433 tests = []struct { 434 bucketType string 435 checkPresent bool 436 verEnabled bool 437 evict bool 438 }{ 439 {checkPresent: true, bucketType: typeLocal}, 440 {checkPresent: true, bucketType: typeCloud, evict: false}, 441 {checkPresent: true, bucketType: typeCloud, evict: true}, 442 443 {checkPresent: false, verEnabled: false, bucketType: typeLocal}, 444 {checkPresent: false, verEnabled: true, bucketType: typeLocal}, 445 446 {checkPresent: false, verEnabled: false, bucketType: typeRemoteAIS, evict: false}, 447 {checkPresent: false, verEnabled: false, bucketType: typeRemoteAIS, evict: true}, 448 449 {checkPresent: false, verEnabled: false, bucketType: typeLocal}, 450 {checkPresent: false, verEnabled: true, bucketType: typeLocal}, 451 452 {checkPresent: false, verEnabled: false, bucketType: typeCloud, evict: false}, 453 {checkPresent: false, verEnabled: false, bucketType: typeCloud, evict: true}, 454 // valid only if the cloud bucket has versioning enabled 455 {checkPresent: false, verEnabled: true, bucketType: typeCloud, evict: false}, 456 {checkPresent: false, verEnabled: true, bucketType: typeCloud, evict: true}, 457 } 458 ) 459 460 for _, test := range tests { 461 name := fmt.Sprintf( 462 "checkPresent=%t/verEnabled=%t/type=%s/evict=%t", 463 test.checkPresent, test.verEnabled, test.bucketType, test.evict, 464 ) 465 t.Run(name, func(t *testing.T) { 466 m := ioContext{ 467 t: t, 468 num: 10, 469 fileSize: 512, 470 fixedSize: true, 471 prefix: "props/obj-", 472 } 473 474 m.init(true /*cleanup*/) 475 476 switch test.bucketType { 477 case typeCloud: 478 m.bck = cliBck 479 tools.CheckSkip(t, &tools.SkipTestArgs{RemoteBck: true, Bck: m.bck}) 480 case typeLocal: 481 tools.CreateBucket(t, proxyURL, m.bck, nil, true /*cleanup*/) 482 case typeRemoteAIS: 483 tools.CheckSkip(t, &tools.SkipTestArgs{RequiresRemoteCluster: true}) 484 m.bck.Ns.UUID = tools.RemoteCluster.UUID 485 tools.CreateBucket(t, proxyURL, m.bck, nil, true /*cleanup*/) 486 default: 487 tassert.CheckFatal(t, fmt.Errorf("unknown type %q", test.bucketType)) 488 } 489 490 defaultBckProp, err := api.HeadBucket(baseParams, m.bck, true /* don't add */) 491 tassert.CheckFatal(t, err) 492 493 _, err = api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{ 494 Versioning: &cmn.VersionConfToSet{ 495 Enabled: apc.Ptr(test.verEnabled), 496 }, 497 }) 498 if test.bucketType == typeCloud && test.verEnabled != defaultBckProp.Versioning.Enabled { 499 s := "versioned" 500 if !defaultBckProp.Versioning.Enabled { 501 s = "unversioned" 502 } 503 tassert.Errorf( 504 t, err != nil, 505 "Cloud bucket %s is %s - expecting set-props to fail", m.bck, s) 506 } else { 507 tassert.CheckFatal(t, err) 508 } 509 if test.bucketType == typeCloud || test.bucketType == typeRemoteAIS { 510 m.remotePuts(test.evict) 511 defer api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{ 512 Versioning: &cmn.VersionConfToSet{ 513 Enabled: apc.Ptr(defaultBckProp.Versioning.Enabled), 514 }, 515 }) 516 } else { 517 m.puts() 518 m.gets(nil, false) // set the access time 519 } 520 521 bckProps, err := api.HeadBucket(baseParams, m.bck, true /* don't add */) 522 tassert.CheckFatal(t, err) 523 524 for _, objName := range m.objNames { 525 tlog.Logf("checking %s props...\n", m.bck.Cname(objName)) 526 527 flt := apc.FltPresent 528 if test.checkPresent { 529 flt = apc.FltPresentNoProps 530 } 531 if test.bucketType != typeLocal && test.evict && !test.checkPresent { 532 flt = apc.FltExistsOutside 533 } 534 535 props, err := api.HeadObject(baseParams, m.bck, objName, flt, false /*silent*/) 536 if test.checkPresent { 537 if test.bucketType != typeLocal && test.evict { 538 tassert.Fatalf(t, err != nil, 539 "object should be marked as 'not exists' (it is not cached)") 540 } else { 541 tassert.CheckFatal(t, err) 542 } 543 tassert.Errorf(t, props == nil, "props should be empty") 544 continue 545 } 546 tassert.CheckFatal(t, err) 547 548 tassert.Errorf( 549 t, props.Bck.Provider == bckProps.Provider, 550 "expected provider (%s) to be %s", props.Bck.Provider, bckProps.Provider, 551 ) 552 tassert.Errorf( 553 t, uint64(props.Size) == m.fileSize, 554 "object size (%d) is different from expected (%d)", props.Size, m.fileSize, 555 ) 556 if test.bucketType != typeLocal { 557 if test.evict { 558 tassert.Errorf(t, !props.Present, "object should not be present (cached)") 559 } else { 560 tassert.Errorf(t, props.Present, "object should be present (cached)") 561 } 562 if defaultBckProp.Versioning.Enabled && (test.verEnabled || test.evict) { 563 tassert.Errorf(t, props.Ver != "", "%s object version should not be empty", test.bucketType) 564 } else { 565 tassert.Errorf(t, props.Ver == "" || defaultBckProp.Versioning.Enabled || 566 test.bucketType == typeRemoteAIS, 567 "%s object version should be empty, have %q (enabled=%t)", 568 test.bucketType, props.Ver, defaultBckProp.Versioning.Enabled) 569 } 570 if test.evict { 571 tassert.Errorf(t, props.Atime == 0, 572 "expected %s access time to be empty (not cached)", m.bck.Cname(objName)) 573 } else { 574 tassert.Errorf(t, props.Atime != 0, "expected access time to be set (cached)") 575 } 576 } else { 577 tassert.Errorf(t, props.Present, "object seems to be not present") 578 tassert.Errorf( 579 t, props.Mirror.Copies == 1, 580 "number of copies (%d) is different than 1", props.Mirror.Copies, 581 ) 582 if test.verEnabled { 583 tassert.Errorf( 584 t, props.Ver == "1", 585 "object version (%s) different than expected (1)", props.Ver, 586 ) 587 } else { 588 tassert.Errorf(t, props.Ver == "", "object version should be empty") 589 } 590 tassert.Errorf(t, props.Atime != 0, "expected access time to be set") 591 } 592 tassert.Errorf(t, !props.EC.IsECCopy, "expected object not to be ec copy") 593 tassert.Errorf( 594 t, props.EC.DataSlices == 0, 595 "expected data slices (%d) to be 0", props.EC.DataSlices, 596 ) 597 tassert.Errorf( 598 t, props.EC.ParitySlices == 0, 599 "expected parity slices (%d) to be 0", props.EC.ParitySlices, 600 ) 601 } 602 }) 603 } 604 } 605 606 func testListObjects(t *testing.T, proxyURL string, bck cmn.Bck, msg *apc.LsoMsg) *cmn.LsoRes { 607 if msg == nil { 608 tlog.Logf("LIST %s []\n", bck) 609 } else if msg.Prefix == "" && msg.PageSize == 0 && msg.ContinuationToken == "" { 610 tlog.Logf("LIST %s [cached: %t]\n", bck, msg.IsFlagSet(apc.LsObjCached)) 611 } else { 612 tlog.Logf("LIST %s [prefix: %q, page_size: %d, cached: %t, token: %q]\n", 613 bck, msg.Prefix, msg.PageSize, msg.IsFlagSet(apc.LsObjCached), msg.ContinuationToken) 614 } 615 baseParams := tools.BaseAPIParams(proxyURL) 616 resList, err := api.ListObjects(baseParams, bck, msg, api.ListArgs{}) 617 tassert.Fatalf(t, err == nil, "%s: list-objects failed: %v", bck, err) 618 return resList 619 }