github.com/NVIDIA/aistore@v1.3.23-0.20240517131212-7df6609be51d/ais/test/regression_test.go (about)

     1  // Package integration_test.
     2  /*
     3   * Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved.
     4   */
     5  package integration_test
     6  
     7  import (
     8  	"fmt"
     9  	"io"
    10  	"math/rand"
    11  	"net/http"
    12  	"net/url"
    13  	"os"
    14  	"path"
    15  	"path/filepath"
    16  	"strconv"
    17  	"strings"
    18  	"sync"
    19  	"testing"
    20  	"time"
    21  
    22  	"github.com/NVIDIA/aistore/api"
    23  	"github.com/NVIDIA/aistore/api/apc"
    24  	"github.com/NVIDIA/aistore/cmn"
    25  	"github.com/NVIDIA/aistore/cmn/cos"
    26  	"github.com/NVIDIA/aistore/cmn/feat"
    27  	"github.com/NVIDIA/aistore/core"
    28  	"github.com/NVIDIA/aistore/core/meta"
    29  	"github.com/NVIDIA/aistore/stats"
    30  	"github.com/NVIDIA/aistore/tools"
    31  	"github.com/NVIDIA/aistore/tools/docker"
    32  	"github.com/NVIDIA/aistore/tools/readers"
    33  	"github.com/NVIDIA/aistore/tools/tassert"
    34  	"github.com/NVIDIA/aistore/tools/tlog"
    35  	"github.com/NVIDIA/aistore/xact"
    36  )
    37  
    38  type Test struct {
    39  	name   string
    40  	method func(*testing.T)
    41  }
    42  
    43  type regressionTestData struct {
    44  	bck        cmn.Bck
    45  	renamedBck cmn.Bck
    46  	numBuckets int
    47  	rename     bool
    48  	wait       bool
    49  }
    50  
    51  const (
    52  	rootDir        = "/tmp/ais"
    53  	testBucketName = "TESTAISBUCKET"
    54  )
    55  
    56  func TestListObjectsLocalGetLocation(t *testing.T) {
    57  	var (
    58  		m = ioContext{
    59  			t:         t,
    60  			num:       1000,
    61  			fileSize:  cos.KiB,
    62  			fixedSize: true,
    63  		}
    64  
    65  		targets    = make(map[string]struct{})
    66  		proxyURL   = tools.RandomProxyURL(t)
    67  		baseParams = tools.BaseAPIParams(proxyURL)
    68  		smap       = tools.GetClusterMap(t, proxyURL)
    69  	)
    70  
    71  	m.initAndSaveState(true /*cleanup*/)
    72  	m.expectTargets(1)
    73  
    74  	tools.CreateBucket(t, proxyURL, m.bck, nil, true /*cleanup*/)
    75  
    76  	m.puts()
    77  
    78  	msg := &apc.LsoMsg{Props: apc.GetPropsLocation}
    79  	lst, err := api.ListObjects(baseParams, m.bck, msg, api.ListArgs{Limit: int64(m.num)})
    80  	tassert.CheckFatal(t, err)
    81  
    82  	if len(lst.Entries) != m.num {
    83  		t.Errorf("Expected %d bucket list entries, found %d\n", m.num, len(lst.Entries))
    84  	}
    85  
    86  	j := 10
    87  	if len(lst.Entries) >= 200 {
    88  		j = 100
    89  	}
    90  	for i, e := range lst.Entries {
    91  		if e.Location == "" {
    92  			t.Fatalf("[%#v]: location is empty", e)
    93  		}
    94  		tname, _ := core.ParseObjLoc(e.Location)
    95  		tid := meta.N2ID(tname)
    96  		targets[tid] = struct{}{}
    97  		tsi := smap.GetTarget(tid)
    98  		url := tsi.URL(cmn.NetPublic)
    99  		baseParams := tools.BaseAPIParams(url)
   100  
   101  		oah, err := api.GetObject(baseParams, m.bck, e.Name, nil)
   102  		tassert.CheckFatal(t, err)
   103  		if uint64(oah.Size()) != m.fileSize {
   104  			t.Errorf("Expected filesize: %d, actual filesize: %d\n", m.fileSize, oah.Size())
   105  		}
   106  
   107  		if i%j == 0 {
   108  			if i == 0 {
   109  				tlog.Logln("Modifying config to enforce intra-cluster access, expecting errors...\n")
   110  			}
   111  			tools.SetClusterConfig(t, cos.StrKVs{"features": feat.EnforceIntraClusterAccess.String()})
   112  			t.Cleanup(func() {
   113  				tools.SetClusterConfig(t, cos.StrKVs{"features": "0"})
   114  			})
   115  
   116  			_, err = api.GetObject(baseParams, m.bck, e.Name, nil)
   117  			if err == nil {
   118  				tlog.Logln("Warning: expected error, got nil")
   119  			}
   120  			tools.SetClusterConfig(t, cos.StrKVs{"features": "0"})
   121  		}
   122  	}
   123  
   124  	if smap.CountActiveTs() != len(targets) { // The objects should have been distributed to all targets
   125  		t.Errorf("Expected %d different target URLs, actual: %d different target URLs",
   126  			smap.CountActiveTs(), len(targets))
   127  	}
   128  
   129  	// Ensure no target URLs are returned when the property is not requested
   130  	msg.Props = ""
   131  	lst, err = api.ListObjects(baseParams, m.bck, msg, api.ListArgs{Limit: int64(m.num)})
   132  	tassert.CheckFatal(t, err)
   133  
   134  	if len(lst.Entries) != m.num {
   135  		t.Errorf("Expected %d bucket list entries, found %d\n", m.num, len(lst.Entries))
   136  	}
   137  
   138  	for _, e := range lst.Entries {
   139  		if e.Location != "" {
   140  			t.Fatalf("[%#v]: location expected to be empty\n", e)
   141  		}
   142  	}
   143  }
   144  
   145  func TestListObjectsCloudGetLocation(t *testing.T) {
   146  	var (
   147  		m = ioContext{
   148  			t:        t,
   149  			bck:      cliBck,
   150  			num:      100,
   151  			fileSize: cos.KiB,
   152  		}
   153  		targets    = make(map[string]struct{})
   154  		bck        = cliBck
   155  		proxyURL   = tools.RandomProxyURL(t)
   156  		baseParams = tools.BaseAPIParams(proxyURL)
   157  		smap       = tools.GetClusterMap(t, proxyURL)
   158  	)
   159  
   160  	tools.CheckSkip(t, &tools.SkipTestArgs{RemoteBck: true, Bck: bck})
   161  
   162  	m.initAndSaveState(true /*cleanup*/)
   163  	m.expectTargets(2)
   164  
   165  	m.puts()
   166  
   167  	listObjectsMsg := &apc.LsoMsg{Props: apc.GetPropsLocation, Flags: apc.LsObjCached}
   168  	lst, err := api.ListObjects(baseParams, bck, listObjectsMsg, api.ListArgs{})
   169  	tassert.CheckFatal(t, err)
   170  
   171  	if len(lst.Entries) < m.num {
   172  		t.Errorf("Bucket %s has %d objects, expected %d", m.bck, len(lst.Entries), m.num)
   173  	}
   174  	j := 10
   175  	if len(lst.Entries) >= 200 {
   176  		j = 100
   177  	}
   178  	for i, e := range lst.Entries {
   179  		if e.Location == "" {
   180  			t.Fatalf("[%#v]: location is empty", e)
   181  		}
   182  		tmp := strings.Split(e.Location, apc.LocationPropSepa)
   183  		tid := meta.N2ID(tmp[0])
   184  		targets[tid] = struct{}{}
   185  		tsi := smap.GetTarget(tid)
   186  		url := tsi.URL(cmn.NetPublic)
   187  		baseParams := tools.BaseAPIParams(url)
   188  
   189  		oah, err := api.GetObject(baseParams, bck, e.Name, nil)
   190  		tassert.CheckFatal(t, err)
   191  		if uint64(oah.Size()) != m.fileSize {
   192  			t.Errorf("Expected fileSize: %d, actual fileSize: %d\n", m.fileSize, oah.Size())
   193  		}
   194  
   195  		if i%j == 0 {
   196  			if i == 0 {
   197  				tlog.Logln("Modifying config to enforce intra-cluster access, expecting errors...\n")
   198  			}
   199  			tools.SetClusterConfig(t, cos.StrKVs{"features": feat.EnforceIntraClusterAccess.String()})
   200  			_, err = api.GetObject(baseParams, m.bck, e.Name, nil)
   201  
   202  			if err == nil {
   203  				tlog.Logln("Warning: expected error, got nil")
   204  			}
   205  
   206  			tools.SetClusterConfig(t, cos.StrKVs{"features": "0"})
   207  		}
   208  	}
   209  
   210  	// The objects should have been distributed to all targets
   211  	if m.originalTargetCount != len(targets) {
   212  		t.Errorf("Expected %d different target URLs, actual: %d different target URLs", m.originalTargetCount, len(targets))
   213  	}
   214  
   215  	// Ensure no target URLs are returned when the property is not requested
   216  	listObjectsMsg.Props = ""
   217  	lst, err = api.ListObjects(baseParams, bck, listObjectsMsg, api.ListArgs{})
   218  	tassert.CheckFatal(t, err)
   219  
   220  	if len(lst.Entries) != m.num {
   221  		t.Errorf("Expected %d bucket list entries, found %d\n", m.num, len(lst.Entries))
   222  	}
   223  
   224  	for _, e := range lst.Entries {
   225  		if e.Location != "" {
   226  			t.Fatalf("[%#v]: location expected to be empty\n", e)
   227  		}
   228  	}
   229  }
   230  
   231  // 1. PUT file
   232  // 2. Corrupt the file
   233  // 3. GET file
   234  func TestGetCorruptFileAfterPut(t *testing.T) {
   235  	var (
   236  		m = ioContext{
   237  			t:        t,
   238  			num:      1,
   239  			fileSize: cos.KiB,
   240  		}
   241  
   242  		proxyURL   = tools.RandomProxyURL(t)
   243  		baseParams = tools.BaseAPIParams(proxyURL)
   244  	)
   245  
   246  	if docker.IsRunning() {
   247  		t.Skipf("%q requires setting xattrs, doesn't work with docker", t.Name())
   248  	}
   249  
   250  	m.init(true /*cleanup*/)
   251  	initMountpaths(t, proxyURL)
   252  
   253  	tools.CreateBucket(t, proxyURL, m.bck, nil, true /*cleanup*/)
   254  
   255  	m.puts()
   256  
   257  	// Test corrupting the file contents.
   258  	objName := m.objNames[0]
   259  	fqn := findObjOnDisk(m.bck, objName)
   260  	tlog.Logf("Corrupting object data %q: %s\n", objName, fqn)
   261  	err := os.WriteFile(fqn, []byte("this file has been corrupted"), cos.PermRWR)
   262  	tassert.CheckFatal(t, err)
   263  
   264  	_, err = api.GetObjectWithValidation(baseParams, m.bck, objName, nil)
   265  	tassert.Errorf(t, err != nil, "error is nil, expected error getting corrupted object")
   266  }
   267  
   268  func TestRegressionBuckets(t *testing.T) {
   269  	var (
   270  		bck = cmn.Bck{
   271  			Name:     testBucketName,
   272  			Provider: apc.AIS,
   273  		}
   274  		proxyURL = tools.RandomProxyURL(t)
   275  	)
   276  	tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/)
   277  	doBucketRegressionTest(t, proxyURL, regressionTestData{bck: bck})
   278  }
   279  
   280  func TestRenameBucket(t *testing.T) {
   281  	tools.CheckSkip(t, &tools.SkipTestArgs{Long: true})
   282  
   283  	var (
   284  		bck = cmn.Bck{
   285  			Name:     testBucketName,
   286  			Provider: apc.AIS,
   287  		}
   288  		proxyURL   = tools.RandomProxyURL(t)
   289  		baseParams = tools.BaseAPIParams(proxyURL)
   290  		renamedBck = cmn.Bck{
   291  			Name:     bck.Name + "_" + cos.GenTie(),
   292  			Provider: apc.AIS,
   293  		}
   294  	)
   295  	for _, wait := range []bool{true, false} {
   296  		t.Run(fmt.Sprintf("wait=%v", wait), func(t *testing.T) {
   297  			tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/)
   298  			t.Cleanup(func() {
   299  				tools.DestroyBucket(t, proxyURL, renamedBck)
   300  			})
   301  
   302  			bcks, err := api.ListBuckets(baseParams, cmn.QueryBcks{Provider: bck.Provider}, apc.FltPresent)
   303  			tassert.CheckFatal(t, err)
   304  
   305  			regData := regressionTestData{
   306  				bck: bck, renamedBck: renamedBck,
   307  				numBuckets: len(bcks), rename: true, wait: wait,
   308  			}
   309  			doBucketRegressionTest(t, proxyURL, regData)
   310  		})
   311  	}
   312  }
   313  
   314  //
   315  // doBucketRe*
   316  //
   317  
   318  //nolint:gocritic // ignoring (regressionTestData) hugeParam
   319  func doBucketRegressionTest(t *testing.T, proxyURL string, rtd regressionTestData) {
   320  	var (
   321  		m = ioContext{
   322  			t:        t,
   323  			bck:      rtd.bck,
   324  			num:      2036,
   325  			fileSize: cos.KiB,
   326  		}
   327  		baseParams = tools.BaseAPIParams(proxyURL)
   328  
   329  		xid string
   330  		err error
   331  	)
   332  
   333  	m.init(true /*cleanup*/)
   334  	m.puts()
   335  
   336  	if rtd.rename {
   337  		xid, err = api.RenameBucket(baseParams, rtd.bck, rtd.renamedBck)
   338  		if err != nil && ensurePrevRebalanceIsFinished(baseParams, err) {
   339  			// can retry
   340  			xid, err = api.RenameBucket(baseParams, rtd.bck, rtd.renamedBck)
   341  		}
   342  		tassert.CheckFatal(t, err)
   343  
   344  		tlog.Logf("Renamed %s => %s\n", rtd.bck, rtd.renamedBck)
   345  		if rtd.wait {
   346  			postRenameWaitAndCheck(t, baseParams, rtd, m.num, m.objNames, xid)
   347  		}
   348  		m.bck = rtd.renamedBck
   349  	}
   350  
   351  	var getArgs *api.GetArgs
   352  	if !rtd.wait {
   353  		tlog.Logln("Warning: proceeding to GET while rebalance is running ('silence = true')")
   354  		getArgs = &api.GetArgs{Query: url.Values{apc.QparamSilent: []string{"true"}}}
   355  	}
   356  	m.gets(getArgs, false /*with validation*/)
   357  
   358  	if !rtd.rename || rtd.wait {
   359  		m.del()
   360  	} else {
   361  		postRenameWaitAndCheck(t, baseParams, rtd, m.num, m.objNames, xid)
   362  		m.del()
   363  	}
   364  }
   365  
   366  //nolint:gocritic // ignoring (regressionTestData) hugeParam
   367  func postRenameWaitAndCheck(t *testing.T, baseParams api.BaseParams, rtd regressionTestData, numPuts int, objNames []string, xid string) {
   368  	xargs := xact.ArgsMsg{ID: xid, Kind: apc.ActMoveBck, Bck: rtd.renamedBck, Timeout: tools.RebalanceTimeout}
   369  	_, err := api.WaitForXactionIC(baseParams, &xargs)
   370  	if err != nil {
   371  		if herr, ok := err.(*cmn.ErrHTTP); ok && herr.Status == http.StatusNotFound {
   372  			smap := tools.GetClusterMap(t, proxyURL)
   373  			if smap.CountActiveTs() == 1 {
   374  				err = nil
   375  			}
   376  		}
   377  		tassert.CheckFatal(t, err)
   378  	} else {
   379  		tlog.Logf("rename-bucket[%s] %s => %s done\n", xid, rtd.bck, rtd.renamedBck)
   380  	}
   381  	bcks, err := api.ListBuckets(baseParams, cmn.QueryBcks{Provider: rtd.bck.Provider}, apc.FltPresent)
   382  	tassert.CheckFatal(t, err)
   383  
   384  	if len(bcks) != rtd.numBuckets {
   385  		t.Fatalf("wrong number of ais buckets (names) before and after rename (before: %d. after: %+v)",
   386  			rtd.numBuckets, bcks)
   387  	}
   388  
   389  	renamedBucketExists := false
   390  	for _, bck := range bcks {
   391  		if bck.Name == rtd.renamedBck.Name {
   392  			renamedBucketExists = true
   393  		} else if bck.Name == rtd.bck.Name {
   394  			t.Fatalf("original ais bucket %s still exists after rename", rtd.bck)
   395  		}
   396  	}
   397  
   398  	if !renamedBucketExists {
   399  		t.Fatalf("renamed ais bucket %s does not exist after rename", rtd.renamedBck)
   400  	}
   401  
   402  	lst, err := api.ListObjects(baseParams, rtd.renamedBck, nil, api.ListArgs{})
   403  	tassert.CheckFatal(t, err)
   404  	unique := make(map[string]bool)
   405  	for _, e := range lst.Entries {
   406  		base := filepath.Base(e.Name)
   407  		unique[base] = true
   408  	}
   409  	if len(unique) != numPuts {
   410  		for _, name := range objNames {
   411  			if _, ok := unique[name]; !ok {
   412  				tlog.Logf("not found: %s\n", name)
   413  			}
   414  		}
   415  		t.Fatalf("wrong number of objects in the bucket %s renamed as %s (before: %d. after: %d)",
   416  			rtd.bck, rtd.renamedBck, numPuts, len(unique))
   417  	}
   418  }
   419  
   420  func TestRenameObjects(t *testing.T) {
   421  	var (
   422  		renameStr  = "rename"
   423  		proxyURL   = tools.RandomProxyURL(t)
   424  		baseParams = tools.BaseAPIParams(proxyURL)
   425  		bck        = cmn.Bck{
   426  			Name:     t.Name(),
   427  			Provider: apc.AIS,
   428  		}
   429  	)
   430  
   431  	tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/)
   432  
   433  	objNames, _, err := tools.PutRandObjs(tools.PutObjectsArgs{
   434  		ProxyURL:  proxyURL,
   435  		Bck:       bck,
   436  		ObjCnt:    100,
   437  		CksumType: bck.DefaultProps(initialClusterConfig).Cksum.Type,
   438  	})
   439  	tassert.CheckFatal(t, err)
   440  
   441  	newObjNames := make([]string, 0, len(objNames))
   442  	for i, objName := range objNames {
   443  		newObjName := path.Join(renameStr, objName) + ".renamed" // objName fqn
   444  		newObjNames = append(newObjNames, newObjName)
   445  
   446  		err := api.RenameObject(baseParams, bck, objName, newObjName)
   447  		tassert.CheckFatal(t, err)
   448  
   449  		i++
   450  		if i%50 == 0 {
   451  			tlog.Logf("Renamed %s => %s\n", objName, newObjName)
   452  		}
   453  	}
   454  
   455  	// Check that renamed objects exist.
   456  	for _, newObjName := range newObjNames {
   457  		_, err := api.GetObject(baseParams, bck, newObjName, nil)
   458  		tassert.CheckError(t, err)
   459  	}
   460  }
   461  
   462  func TestObjectPrefix(t *testing.T) {
   463  	runProviderTests(t, func(t *testing.T, bck *meta.Bck) {
   464  		var (
   465  			proxyURL  = tools.RandomProxyURL(t)
   466  			b         = bck.Clone()
   467  			fileNames = prefixCreateFiles(t, proxyURL, b, bck.Props.Cksum.Type)
   468  		)
   469  		prefixLookup(t, proxyURL, b, fileNames)
   470  		prefixCleanup(t, proxyURL, b, fileNames)
   471  	})
   472  }
   473  
   474  func TestReregisterMultipleTargets(t *testing.T) {
   475  	tools.CheckSkip(t, &tools.SkipTestArgs{Long: true})
   476  
   477  	var (
   478  		filesSentOrig = make(map[string]int64)
   479  		filesRecvOrig = make(map[string]int64)
   480  		bytesSentOrig = make(map[string]int64)
   481  		bytesRecvOrig = make(map[string]int64)
   482  		filesSent     int64
   483  		filesRecv     int64
   484  		bytesSent     int64
   485  		bytesRecv     int64
   486  
   487  		m = ioContext{
   488  			t:   t,
   489  			num: 10000,
   490  		}
   491  	)
   492  
   493  	m.initAndSaveState(true /*cleanup*/)
   494  	m.expectTargets(2)
   495  	targetsToUnregister := m.originalTargetCount - 1
   496  
   497  	// Step 0: Collect rebalance stats
   498  	clusterStats := tools.GetClusterStats(t, m.proxyURL)
   499  	for targetID, targetStats := range clusterStats.Target {
   500  		filesSentOrig[targetID] = tools.GetNamedStatsVal(targetStats, stats.StreamsOutObjCount)
   501  		filesRecvOrig[targetID] = tools.GetNamedStatsVal(targetStats, stats.StreamsInObjCount)
   502  		bytesSentOrig[targetID] = tools.GetNamedStatsVal(targetStats, stats.StreamsOutObjSize)
   503  		bytesRecvOrig[targetID] = tools.GetNamedStatsVal(targetStats, stats.StreamsInObjSize)
   504  	}
   505  
   506  	// Step 1: Unregister multiple targets
   507  	removed := make(map[string]*meta.Snode, m.smap.CountActiveTs()-1)
   508  	defer func() {
   509  		var rebID string
   510  		for _, tgt := range removed {
   511  			rebID = m.stopMaintenance(tgt)
   512  		}
   513  		tools.WaitForRebalanceByID(t, baseParams, rebID)
   514  	}()
   515  
   516  	targets := m.smap.Tmap.ActiveNodes()
   517  	for i := range targetsToUnregister {
   518  		tlog.Logf("Put %s in maintenance (no rebalance)\n", targets[i].StringEx())
   519  		args := &apc.ActValRmNode{DaemonID: targets[i].ID(), SkipRebalance: true}
   520  		_, err := api.StartMaintenance(baseParams, args)
   521  		tassert.CheckFatal(t, err)
   522  		removed[targets[i].ID()] = targets[i]
   523  	}
   524  
   525  	smap, err := tools.WaitForClusterState(proxyURL, "remove targets",
   526  		m.smap.Version, m.originalProxyCount, m.originalTargetCount-targetsToUnregister)
   527  	tassert.CheckFatal(t, err)
   528  	tlog.Logf("The cluster now has %d target(s)\n", smap.CountActiveTs())
   529  
   530  	// Step 2: PUT objects into a newly created bucket
   531  	tools.CreateBucket(t, m.proxyURL, m.bck, nil, true /*cleanup*/)
   532  	m.puts()
   533  
   534  	// Step 3: Start performing GET requests
   535  	go m.getsUntilStop()
   536  
   537  	// Step 4: Simultaneously reregister each
   538  	wg := &sync.WaitGroup{}
   539  	for i := range targetsToUnregister {
   540  		wg.Add(1)
   541  		go func(r int) {
   542  			defer wg.Done()
   543  			m.stopMaintenance(targets[r])
   544  			delete(removed, targets[r].ID())
   545  		}(i)
   546  		time.Sleep(5 * time.Second) // wait some time before reregistering next target
   547  	}
   548  	wg.Wait()
   549  	tlog.Logf("Stopping GETs...\n")
   550  	m.stopGets()
   551  
   552  	baseParams := tools.BaseAPIParams(m.proxyURL)
   553  	tools.WaitForRebalAndResil(t, baseParams)
   554  
   555  	clusterStats = tools.GetClusterStats(t, m.proxyURL)
   556  	for targetID, targetStats := range clusterStats.Target {
   557  		filesSent += tools.GetNamedStatsVal(targetStats, stats.StreamsOutObjCount) - filesSentOrig[targetID]
   558  		filesRecv += tools.GetNamedStatsVal(targetStats, stats.StreamsInObjCount) - filesRecvOrig[targetID]
   559  		bytesSent += tools.GetNamedStatsVal(targetStats, stats.StreamsOutObjSize) - bytesSentOrig[targetID]
   560  		bytesRecv += tools.GetNamedStatsVal(targetStats, stats.StreamsInObjSize) - bytesRecvOrig[targetID]
   561  	}
   562  
   563  	// Step 5: Log rebalance stats
   564  	tlog.Logf("Rebalance sent     %s in %d files\n", cos.ToSizeIEC(bytesSent, 2), filesSent)
   565  	tlog.Logf("Rebalance received %s in %d files\n", cos.ToSizeIEC(bytesRecv, 2), filesRecv)
   566  
   567  	m.ensureNoGetErrors()
   568  	m.waitAndCheckCluState()
   569  }
   570  
   571  func TestGetNodeStats(t *testing.T) {
   572  	proxyURL := tools.RandomProxyURL(t)
   573  	baseParams := tools.BaseAPIParams(proxyURL)
   574  	smap := tools.GetClusterMap(t, proxyURL)
   575  
   576  	proxy, err := smap.GetRandProxy(false)
   577  	tassert.CheckFatal(t, err)
   578  	tlog.Logf("%s:\n", proxy.StringEx())
   579  	stats, err := api.GetDaemonStats(baseParams, proxy)
   580  	tassert.CheckFatal(t, err)
   581  	tlog.Logf("%+v\n", stats)
   582  
   583  	target, err := smap.GetRandTarget()
   584  	tassert.CheckFatal(t, err)
   585  	tlog.Logf("%s:\n", target.StringEx())
   586  	stats, err = api.GetDaemonStats(baseParams, target)
   587  	tassert.CheckFatal(t, err)
   588  	tlog.Logf("%+v\n", stats)
   589  }
   590  
   591  func TestGetClusterStats(t *testing.T) {
   592  	proxyURL := tools.RandomProxyURL(t)
   593  	smap := tools.GetClusterMap(t, proxyURL)
   594  	cluStats := tools.GetClusterStats(t, proxyURL)
   595  
   596  	for tid, vStats := range cluStats.Target {
   597  		tsi := smap.GetNode(tid)
   598  		tname := tsi.StringEx()
   599  		tassert.Fatalf(t, tsi != nil, "%s is nil", tid)
   600  		tStats, err := api.GetDaemonStats(baseParams, tsi)
   601  		tassert.CheckFatal(t, err)
   602  
   603  		vCDF := vStats.TargetCDF
   604  		tCDF := tStats.TargetCDF
   605  		if vCDF.PctMax != tCDF.PctMax || vCDF.PctAvg != tCDF.PctAvg {
   606  			t.Errorf("%s: stats are different: [%+v] vs [%+v]\n", tname, vCDF, tCDF)
   607  		}
   608  		if len(vCDF.Mountpaths) != len(tCDF.Mountpaths) {
   609  			t.Errorf("%s: num mountpaths is different: [%+v] vs [%+v]\n", tname, vCDF, tCDF)
   610  		}
   611  		for mpath := range vCDF.Mountpaths {
   612  			tcdf := tCDF.Mountpaths[mpath]
   613  			s := tname + mpath
   614  			if tcdf.Capacity.Used != 0 {
   615  				tlog.Logf("%-30s %+v %+v\n", s, tcdf.Disks, tcdf.Capacity)
   616  			}
   617  		}
   618  	}
   619  }
   620  
   621  func TestLRU(t *testing.T) {
   622  	var (
   623  		proxyURL   = tools.RandomProxyURL(t)
   624  		baseParams = tools.BaseAPIParams(proxyURL)
   625  
   626  		m = &ioContext{
   627  			t:      t,
   628  			bck:    cliBck,
   629  			num:    100,
   630  			prefix: t.Name(),
   631  		}
   632  	)
   633  
   634  	tools.CheckSkip(t, &tools.SkipTestArgs{RemoteBck: true, Bck: m.bck})
   635  
   636  	m.init(true /*cleanup*/)
   637  	m.remotePuts(false /*evict*/)
   638  
   639  	// Remember targets' watermarks
   640  	var (
   641  		usedPct      = int32(100)
   642  		cluStats     = tools.GetClusterStats(t, proxyURL)
   643  		filesEvicted = make(map[string]int64)
   644  		bytesEvicted = make(map[string]int64)
   645  	)
   646  
   647  	// Find out min usage % across all targets
   648  	for tid, v := range cluStats.Target {
   649  		filesEvicted[tid] = tools.GetNamedStatsVal(v, "lru.evict.n")
   650  		bytesEvicted[tid] = tools.GetNamedStatsVal(v, "lru.evict.size")
   651  		for _, c := range v.TargetCDF.Mountpaths {
   652  			usedPct = min(usedPct, c.PctUsed)
   653  		}
   654  	}
   655  
   656  	var (
   657  		lowWM     = usedPct - 5
   658  		cleanupWM = lowWM - 1
   659  		highWM    = usedPct - 2
   660  	)
   661  	if int(lowWM) < 2 {
   662  		t.Skipf("The current space usage is too low (%d) for the LRU to be tested", lowWM)
   663  		return
   664  	}
   665  
   666  	tlog.Logf("LRU: current min space usage in the cluster: %d%%\n", usedPct)
   667  	tlog.Logf("setting 'space.lowm=%d' and 'space.highwm=%d'\n", lowWM, highWM)
   668  
   669  	// All targets: set new watermarks; restore upon exit
   670  	oconfig := tools.GetClusterConfig(t)
   671  	defer func() {
   672  		var (
   673  			cleanupWMStr, _ = cos.ConvertToString(oconfig.Space.CleanupWM)
   674  			lowWMStr, _     = cos.ConvertToString(oconfig.Space.LowWM)
   675  			highWMStr, _    = cos.ConvertToString(oconfig.Space.HighWM)
   676  		)
   677  		tools.SetClusterConfig(t, cos.StrKVs{
   678  			"space.cleanupwm":       cleanupWMStr,
   679  			"space.lowwm":           lowWMStr,
   680  			"space.highwm":          highWMStr,
   681  			"lru.dont_evict_time":   oconfig.LRU.DontEvictTime.String(),
   682  			"lru.capacity_upd_time": oconfig.LRU.CapacityUpdTime.String(),
   683  		})
   684  	}()
   685  
   686  	// Cluster-wide reduce dont-evict-time
   687  	cleanupWMStr, _ := cos.ConvertToString(cleanupWM)
   688  	lowWMStr, _ := cos.ConvertToString(lowWM)
   689  	highWMStr, _ := cos.ConvertToString(highWM)
   690  	tools.SetClusterConfig(t, cos.StrKVs{
   691  		"space.cleanupwm":       cleanupWMStr,
   692  		"space.lowwm":           lowWMStr,
   693  		"space.highwm":          highWMStr,
   694  		"lru.dont_evict_time":   "0s",
   695  		"lru.capacity_upd_time": "10s",
   696  	})
   697  
   698  	tlog.Logln("starting LRU...")
   699  	xid, err := api.StartXaction(baseParams, &xact.ArgsMsg{Kind: apc.ActLRU}, "")
   700  	tassert.CheckFatal(t, err)
   701  
   702  	args := xact.ArgsMsg{ID: xid, Kind: apc.ActLRU, Timeout: tools.RebalanceTimeout}
   703  	_, err = api.WaitForXactionIC(baseParams, &args)
   704  	tassert.CheckFatal(t, err)
   705  
   706  	// Check results
   707  	tlog.Logln("checking the results...")
   708  	cluStats = tools.GetClusterStats(t, proxyURL)
   709  	for k, v := range cluStats.Target {
   710  		diffFilesEvicted := tools.GetNamedStatsVal(v, "lru.evict.n") - filesEvicted[k]
   711  		diffBytesEvicted := tools.GetNamedStatsVal(v, "lru.evict.size") - bytesEvicted[k]
   712  		tlog.Logf(
   713  			"Target %s: evicted %d objects - %s (%dB) total\n",
   714  			k, diffFilesEvicted, cos.ToSizeIEC(diffBytesEvicted, 2), diffBytesEvicted,
   715  		)
   716  
   717  		if diffFilesEvicted == 0 {
   718  			t.Errorf("Target %s: LRU failed to evict any objects", k)
   719  		}
   720  	}
   721  }
   722  
   723  func TestPrefetchList(t *testing.T) {
   724  	var (
   725  		m = ioContext{
   726  			t:        t,
   727  			bck:      cliBck,
   728  			num:      100,
   729  			fileSize: cos.KiB,
   730  		}
   731  		bck        = cliBck
   732  		proxyURL   = tools.RandomProxyURL(t)
   733  		baseParams = tools.BaseAPIParams(proxyURL)
   734  	)
   735  
   736  	tools.CheckSkip(t, &tools.SkipTestArgs{Long: true, RemoteBck: true, Bck: bck})
   737  
   738  	m.initAndSaveState(true /*cleanup*/)
   739  	m.expectTargets(2)
   740  	m.puts()
   741  
   742  	// 2. Evict those objects from the cache and prefetch them
   743  	tlog.Logf("Evicting and prefetching %d objects\n", len(m.objNames))
   744  	xid, err := api.EvictMultiObj(baseParams, bck, m.objNames, "" /*template*/)
   745  	if err != nil {
   746  		t.Error(err)
   747  	}
   748  
   749  	args := xact.ArgsMsg{ID: xid, Kind: apc.ActEvictObjects, Timeout: tools.RebalanceTimeout}
   750  	_, err = api.WaitForXactionIC(baseParams, &args)
   751  	tassert.CheckFatal(t, err)
   752  
   753  	// 3. Prefetch evicted objects
   754  	{
   755  		var msg apc.PrefetchMsg
   756  		msg.ObjNames = m.objNames
   757  		xid, err = api.Prefetch(baseParams, bck, msg)
   758  		if err != nil {
   759  			t.Error(err)
   760  		}
   761  	}
   762  
   763  	args = xact.ArgsMsg{ID: xid, Kind: apc.ActPrefetchObjects, Timeout: tools.RebalanceTimeout}
   764  	_, err = api.WaitForXactionIC(baseParams, &args)
   765  	tassert.CheckFatal(t, err)
   766  
   767  	// 4. Ensure that all the prefetches occurred.
   768  	xargs := xact.ArgsMsg{ID: xid, Timeout: tools.RebalanceTimeout}
   769  	snaps, err := api.QueryXactionSnaps(baseParams, &xargs)
   770  	tassert.CheckFatal(t, err)
   771  	locObjs, _, _ := snaps.ObjCounts(xid)
   772  	if locObjs != int64(m.num) {
   773  		t.Errorf("did not prefetch all files: missing %d of %d", int64(m.num)-locObjs, m.num)
   774  	}
   775  
   776  	msg := &apc.LsoMsg{}
   777  	msg.SetFlag(apc.LsObjCached)
   778  	lst, err := api.ListObjects(baseParams, bck, msg, api.ListArgs{})
   779  	tassert.CheckFatal(t, err)
   780  	if len(lst.Entries) != m.num {
   781  		t.Errorf("list-objects %s: expected %d, got %d", bck, m.num, len(lst.Entries))
   782  	} else {
   783  		tlog.Logf("list-objects %s: %d is correct\n", bck, len(m.objNames))
   784  	}
   785  }
   786  
   787  func TestDeleteList(t *testing.T) {
   788  	runProviderTests(t, func(t *testing.T, bck *meta.Bck) {
   789  		var (
   790  			err        error
   791  			prefix     = "__listrange/tstf-"
   792  			wg         = &sync.WaitGroup{}
   793  			objCnt     = 100
   794  			errCh      = make(chan error, objCnt)
   795  			files      = make([]string, 0, objCnt)
   796  			proxyURL   = tools.RandomProxyURL(t)
   797  			baseParams = tools.BaseAPIParams(proxyURL)
   798  			b          = bck.Clone()
   799  		)
   800  
   801  		// 1. Put files to delete
   802  		for i := range objCnt {
   803  			r, err := readers.NewRand(fileSize, bck.Props.Cksum.Type)
   804  			tassert.CheckFatal(t, err)
   805  
   806  			keyname := fmt.Sprintf("%s%d", prefix, i)
   807  
   808  			wg.Add(1)
   809  			go func() {
   810  				defer wg.Done()
   811  				tools.Put(proxyURL, b, keyname, r, errCh)
   812  			}()
   813  			files = append(files, keyname)
   814  		}
   815  		wg.Wait()
   816  		tassert.SelectErr(t, errCh, "put", true)
   817  		tlog.Logf("PUT done.\n")
   818  
   819  		// 2. Delete the objects
   820  		xid, err := api.DeleteMultiObj(baseParams, b, files, "" /*template*/)
   821  		tassert.CheckError(t, err)
   822  
   823  		args := xact.ArgsMsg{ID: xid, Kind: apc.ActDeleteObjects, Timeout: tools.RebalanceTimeout}
   824  		_, err = api.WaitForXactionIC(baseParams, &args)
   825  		tassert.CheckFatal(t, err)
   826  
   827  		// 3. Check to see that all the files have been deleted
   828  		msg := &apc.LsoMsg{Prefix: prefix}
   829  		bktlst, err := api.ListObjects(baseParams, b, msg, api.ListArgs{})
   830  		tassert.CheckFatal(t, err)
   831  		if len(bktlst.Entries) != 0 {
   832  			t.Errorf("Incorrect number of remaining files: %d, should be 0", len(bktlst.Entries))
   833  		}
   834  	})
   835  }
   836  
   837  func TestPrefetchRange(t *testing.T) {
   838  	var (
   839  		m = ioContext{
   840  			t:        t,
   841  			bck:      cliBck,
   842  			num:      200,
   843  			fileSize: cos.KiB,
   844  			prefix:   "regressionList/obj-",
   845  			ordered:  true,
   846  		}
   847  		proxyURL      = tools.RandomProxyURL(t)
   848  		baseParams    = tools.BaseAPIParams(proxyURL)
   849  		prefetchRange = "{1..150}"
   850  		bck           = cliBck
   851  	)
   852  
   853  	tools.CheckSkip(t, &tools.SkipTestArgs{Long: true, RemoteBck: true, Bck: bck})
   854  
   855  	m.initAndSaveState(true /*cleanup*/)
   856  
   857  	m.expectTargets(2)
   858  	m.puts()
   859  	// 1. Parse arguments
   860  	pt, err := cos.ParseBashTemplate(prefetchRange)
   861  	tassert.CheckFatal(t, err)
   862  	rangeMin, rangeMax := pt.Ranges[0].Start, pt.Ranges[0].End
   863  
   864  	// 2. Discover the number of items we expect to be prefetched
   865  	files := make([]string, 0)
   866  	for _, objName := range m.objNames {
   867  		oName := strings.TrimPrefix(objName, m.prefix)
   868  		if i, err := strconv.ParseInt(oName, 10, 64); err != nil {
   869  			continue
   870  		} else if (rangeMin == 0 && rangeMax == 0) || (i >= rangeMin && i <= rangeMax) {
   871  			files = append(files, objName)
   872  		}
   873  	}
   874  
   875  	// 3. Evict those objects from the cache, and then prefetch them
   876  	rng := fmt.Sprintf("%s%s", m.prefix, prefetchRange)
   877  	tlog.Logf("Evicting and prefetching %d objects (range: %s)\n", len(files), rng)
   878  	xid, err := api.EvictMultiObj(baseParams, bck, nil /*lst objnames*/, rng)
   879  	tassert.CheckError(t, err)
   880  	args := xact.ArgsMsg{ID: xid, Kind: apc.ActEvictObjects, Timeout: tools.RebalanceTimeout}
   881  	_, err = api.WaitForXactionIC(baseParams, &args)
   882  	tassert.CheckFatal(t, err)
   883  
   884  	{
   885  		var msg apc.PrefetchMsg
   886  		msg.Template = rng
   887  		xid, err = api.Prefetch(baseParams, bck, msg)
   888  		tassert.CheckError(t, err)
   889  		args = xact.ArgsMsg{ID: xid, Kind: apc.ActPrefetchObjects, Timeout: tools.RebalanceTimeout}
   890  		_, err = api.WaitForXactionIC(baseParams, &args)
   891  		tassert.CheckFatal(t, err)
   892  	}
   893  
   894  	// 4. Ensure all done
   895  	xargs := xact.ArgsMsg{ID: xid, Timeout: tools.RebalanceTimeout}
   896  	snaps, err := api.QueryXactionSnaps(baseParams, &xargs)
   897  	tassert.CheckFatal(t, err)
   898  	locObjs, _, _ := snaps.ObjCounts(xid)
   899  	if locObjs != int64(len(files)) {
   900  		t.Errorf("did not prefetch all files: missing %d of %d", int64(len(files))-locObjs, len(files))
   901  	}
   902  
   903  	msg := &apc.LsoMsg{Prefix: m.prefix}
   904  	msg.SetFlag(apc.LsObjCached)
   905  	lst, err := api.ListObjects(baseParams, bck, msg, api.ListArgs{})
   906  	tassert.CheckFatal(t, err)
   907  	if len(lst.Entries) < len(files) {
   908  		t.Errorf("list-objects %s/%s: expected %d, got %d", bck, m.prefix, len(files), len(lst.Entries))
   909  	} else {
   910  		var count int
   911  		for _, e := range lst.Entries {
   912  			s := e.Name[len(m.prefix):] // "obj-"
   913  			idx, err := strconv.Atoi(s)
   914  			if err == nil && idx >= 1 && idx <= 150 {
   915  				count++
   916  			}
   917  		}
   918  		if count != len(files) {
   919  			t.Errorf("list-objects %s/%s: expected %d, got %d", bck, m.prefix, len(files), count)
   920  		} else {
   921  			tlog.Logf("list-objects %s/%s: %d is correct\n", bck, m.prefix, len(files))
   922  		}
   923  	}
   924  }
   925  
   926  func TestDeleteRange(t *testing.T) {
   927  	runProviderTests(t, func(t *testing.T, bck *meta.Bck) {
   928  		var (
   929  			err            error
   930  			objCnt         = 100
   931  			quarter        = objCnt / 4
   932  			third          = objCnt / 3
   933  			smallrangesize = third - quarter + 1
   934  			prefix         = "__listrange/tstf-"
   935  			smallrange     = fmt.Sprintf("%s{%04d..%04d}", prefix, quarter, third)
   936  			bigrange       = fmt.Sprintf("%s{0000..%04d}", prefix, objCnt)
   937  			wg             = &sync.WaitGroup{}
   938  			errCh          = make(chan error, objCnt)
   939  			proxyURL       = tools.RandomProxyURL(t)
   940  			baseParams     = tools.BaseAPIParams(proxyURL)
   941  			b              = bck.Clone()
   942  		)
   943  
   944  		// 1. Put files to delete
   945  		for i := range objCnt {
   946  			r, err := readers.NewRand(fileSize, bck.Props.Cksum.Type)
   947  			tassert.CheckFatal(t, err)
   948  
   949  			wg.Add(1)
   950  			go func(i int) {
   951  				defer wg.Done()
   952  				tools.Put(proxyURL, b, fmt.Sprintf("%s%04d", prefix, i), r, errCh)
   953  			}(i)
   954  		}
   955  		wg.Wait()
   956  		tassert.SelectErr(t, errCh, "put", true)
   957  		tlog.Logf("PUT done.\n")
   958  
   959  		// 2. Delete the small range of objects
   960  		tlog.Logf("Delete in range %s\n", smallrange)
   961  		xid, err := api.DeleteMultiObj(baseParams, b, nil /*lst objnames*/, smallrange)
   962  		tassert.CheckError(t, err)
   963  		args := xact.ArgsMsg{ID: xid, Kind: apc.ActDeleteObjects, Timeout: tools.RebalanceTimeout}
   964  		_, err = api.WaitForXactionIC(baseParams, &args)
   965  		tassert.CheckFatal(t, err)
   966  
   967  		// 3. Check to see that the correct files have been deleted
   968  		msg := &apc.LsoMsg{Prefix: prefix}
   969  		bktlst, err := api.ListObjects(baseParams, b, msg, api.ListArgs{})
   970  		tassert.CheckFatal(t, err)
   971  		if len(bktlst.Entries) != objCnt-smallrangesize {
   972  			t.Errorf("Incorrect number of remaining files: %d, should be %d", len(bktlst.Entries), objCnt-smallrangesize)
   973  		}
   974  		filemap := make(map[string]*cmn.LsoEnt)
   975  		for _, en := range bktlst.Entries {
   976  			filemap[en.Name] = en
   977  		}
   978  		for i := range objCnt {
   979  			keyname := fmt.Sprintf("%s%04d", prefix, i)
   980  			_, ok := filemap[keyname]
   981  			if ok && i >= quarter && i <= third {
   982  				t.Errorf("File exists that should have been deleted: %s", keyname)
   983  			} else if !ok && (i < quarter || i > third) {
   984  				t.Errorf("File does not exist that should not have been deleted: %s", keyname)
   985  			}
   986  		}
   987  
   988  		tlog.Logf("Delete in range %s\n", bigrange)
   989  		// 4. Delete the big range of objects
   990  		xid, err = api.DeleteMultiObj(baseParams, b, nil /*lst objnames*/, bigrange)
   991  		tassert.CheckError(t, err)
   992  		args = xact.ArgsMsg{ID: xid, Kind: apc.ActDeleteObjects, Timeout: tools.RebalanceTimeout}
   993  		_, err = api.WaitForXactionIC(baseParams, &args)
   994  		tassert.CheckFatal(t, err)
   995  
   996  		// 5. Check to see that all the files have been deleted
   997  		bktlst, err = api.ListObjects(baseParams, b, msg, api.ListArgs{})
   998  		tassert.CheckFatal(t, err)
   999  		if len(bktlst.Entries) != 0 {
  1000  			t.Errorf("Incorrect number of remaining files: %d, should be 0", len(bktlst.Entries))
  1001  		}
  1002  	})
  1003  }
  1004  
  1005  // Testing only ais bucket objects since generally not concerned with cloud bucket object deletion
  1006  func TestStressDeleteRange(t *testing.T) {
  1007  	tools.CheckSkip(t, &tools.SkipTestArgs{Long: true})
  1008  
  1009  	const (
  1010  		numFiles   = 20000 // FIXME: must divide by 10 and by the numReaders
  1011  		numReaders = 200
  1012  	)
  1013  
  1014  	var (
  1015  		err           error
  1016  		wg            = &sync.WaitGroup{}
  1017  		errCh         = make(chan error, numFiles)
  1018  		proxyURL      = tools.RandomProxyURL(t)
  1019  		tenth         = numFiles / 10
  1020  		objNamePrefix = "__listrange/tstf-"
  1021  		partialRange  = fmt.Sprintf("%s{%d..%d}", objNamePrefix, 0, numFiles-tenth-1) // TODO: partial range with non-zero left boundary
  1022  		fullRange     = fmt.Sprintf("%s{0..%d}", objNamePrefix, numFiles)
  1023  		baseParams    = tools.BaseAPIParams(proxyURL)
  1024  		bck           = cmn.Bck{
  1025  			Name:     testBucketName,
  1026  			Provider: apc.AIS,
  1027  		}
  1028  		cksumType = bck.DefaultProps(initialClusterConfig).Cksum.Type
  1029  	)
  1030  
  1031  	tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/)
  1032  
  1033  	// 1. PUT
  1034  	tlog.Logln("putting objects...")
  1035  	for i := range numReaders {
  1036  		size := rand.Int63n(cos.KiB*128) + cos.KiB/3
  1037  		tassert.CheckFatal(t, err)
  1038  		reader, err := readers.NewRand(size, cksumType)
  1039  		tassert.CheckFatal(t, err)
  1040  
  1041  		wg.Add(1)
  1042  		go func(i int, reader readers.Reader) {
  1043  			defer wg.Done()
  1044  
  1045  			for j := range numFiles / numReaders {
  1046  				objName := fmt.Sprintf("%s%d", objNamePrefix, i*numFiles/numReaders+j)
  1047  				putArgs := api.PutArgs{
  1048  					BaseParams: baseParams,
  1049  					Bck:        bck,
  1050  					ObjName:    objName,
  1051  					Cksum:      reader.Cksum(),
  1052  					Reader:     reader,
  1053  				}
  1054  				_, err = api.PutObject(&putArgs)
  1055  				if err != nil {
  1056  					errCh <- err
  1057  				}
  1058  				reader.Seek(0, io.SeekStart)
  1059  			}
  1060  		}(i, reader)
  1061  	}
  1062  	wg.Wait()
  1063  	tassert.SelectErr(t, errCh, "put", true)
  1064  
  1065  	// 2. Delete a range of objects
  1066  	tlog.Logf("Deleting objects in range: %s\n", partialRange)
  1067  	xid, err := api.DeleteMultiObj(baseParams, bck, nil /*lst objnames*/, partialRange)
  1068  	tassert.CheckError(t, err)
  1069  	args := xact.ArgsMsg{ID: xid, Kind: apc.ActDeleteObjects, Timeout: tools.RebalanceTimeout}
  1070  	_, err = api.WaitForXactionIC(baseParams, &args)
  1071  	tassert.CheckFatal(t, err)
  1072  
  1073  	// 3. Check to see that correct objects have been deleted
  1074  	expectedRemaining := tenth
  1075  	msg := &apc.LsoMsg{Prefix: objNamePrefix}
  1076  	lst, err := api.ListObjects(baseParams, bck, msg, api.ListArgs{})
  1077  	tassert.CheckFatal(t, err)
  1078  	if len(lst.Entries) != expectedRemaining {
  1079  		t.Errorf("Incorrect number of remaining objects: %d, expected: %d",
  1080  			len(lst.Entries), expectedRemaining)
  1081  	}
  1082  
  1083  	objNames := make(map[string]*cmn.LsoEnt)
  1084  	for _, en := range lst.Entries {
  1085  		objNames[en.Name] = en
  1086  	}
  1087  	for i := range numFiles {
  1088  		objName := fmt.Sprintf("%s%d", objNamePrefix, i)
  1089  		_, ok := objNames[objName]
  1090  		if ok && i < numFiles-tenth {
  1091  			t.Errorf("%s exists (expected to be deleted)", objName)
  1092  		} else if !ok && i >= numFiles-tenth {
  1093  			t.Errorf("%s does not exist", objName)
  1094  		}
  1095  	}
  1096  
  1097  	// 4. Delete the entire range of objects
  1098  	tlog.Logf("Deleting objects in range: %s\n", fullRange)
  1099  	xid, err = api.DeleteMultiObj(baseParams, bck, nil /*lst objnames*/, fullRange)
  1100  	tassert.CheckError(t, err)
  1101  	args = xact.ArgsMsg{ID: xid, Kind: apc.ActDeleteObjects, Timeout: tools.RebalanceTimeout}
  1102  	_, err = api.WaitForXactionIC(baseParams, &args)
  1103  	tassert.CheckFatal(t, err)
  1104  
  1105  	// 5. Check to see that all files have been deleted
  1106  	msg = &apc.LsoMsg{Prefix: objNamePrefix}
  1107  	lst, err = api.ListObjects(baseParams, bck, msg, api.ListArgs{})
  1108  	tassert.CheckFatal(t, err)
  1109  	if len(lst.Entries) != 0 {
  1110  		t.Errorf("Incorrect number of remaining files: %d, should be 0", len(lst.Entries))
  1111  	}
  1112  }