github.com/NVIDIA/aistore@v1.3.23-0.20240517131212-7df6609be51d/ais/test/downloader_test.go (about)

     1  // Package integration_test.
     2  /*
     3   * Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved.
     4   */
     5  package integration_test
     6  
     7  import (
     8  	"context"
     9  	"fmt"
    10  	"os"
    11  	"reflect"
    12  	"strconv"
    13  	"strings"
    14  	"testing"
    15  	"time"
    16  
    17  	"github.com/NVIDIA/aistore/api"
    18  	"github.com/NVIDIA/aistore/api/apc"
    19  	"github.com/NVIDIA/aistore/cmn"
    20  	"github.com/NVIDIA/aistore/cmn/cos"
    21  	"github.com/NVIDIA/aistore/core/meta"
    22  	"github.com/NVIDIA/aistore/ext/dload"
    23  	"github.com/NVIDIA/aistore/tools"
    24  	"github.com/NVIDIA/aistore/tools/readers"
    25  	"github.com/NVIDIA/aistore/tools/tassert"
    26  	"github.com/NVIDIA/aistore/tools/tlog"
    27  	"github.com/NVIDIA/aistore/tools/trand"
    28  	"github.com/NVIDIA/aistore/xact"
    29  )
    30  
    31  // NOTE: TestDownload* can fail if link, content, version changes - should be super rare!
    32  
    33  const (
    34  	downloadDescAllPrefix = "downloader-test-integration"
    35  	downloadDescAllRegex  = "^" + downloadDescAllPrefix
    36  )
    37  
    38  var downloadDescCurPrefix = fmt.Sprintf("%s-%d-", downloadDescAllPrefix, os.Getpid())
    39  
    40  func generateDownloadDesc() string {
    41  	return downloadDescCurPrefix + time.Now().Format(time.RFC3339Nano)
    42  }
    43  
    44  func clearDownloadList(t *testing.T) {
    45  	listDownload, err := api.DownloadGetList(tools.BaseAPIParams(), downloadDescAllRegex, false /*onlyActive*/)
    46  	tassert.CheckFatal(t, err)
    47  
    48  	if len(listDownload) == 0 {
    49  		return
    50  	}
    51  
    52  	for _, v := range listDownload {
    53  		if v.JobRunning() {
    54  			tlog.Logf("Canceling: %v...\n", v.ID)
    55  			err := api.AbortDownload(tools.BaseAPIParams(), v.ID)
    56  			tassert.CheckFatal(t, err)
    57  		}
    58  	}
    59  
    60  	// Wait for the jobs to complete.
    61  	for running := true; running; {
    62  		time.Sleep(time.Second)
    63  		listDownload, err := api.DownloadGetList(tools.BaseAPIParams(), downloadDescAllRegex, false /*onlyActive*/)
    64  		tassert.CheckFatal(t, err)
    65  
    66  		running = false
    67  		for _, v := range listDownload {
    68  			if v.JobRunning() {
    69  				running = true
    70  				break
    71  			}
    72  		}
    73  	}
    74  
    75  	for _, v := range listDownload {
    76  		tlog.Logf("Removing: %v...\n", v.ID)
    77  		err := api.RemoveDownload(tools.BaseAPIParams(), v.ID)
    78  		tassert.CheckFatal(t, err)
    79  	}
    80  }
    81  
    82  func checkDownloadList(t *testing.T, expNumEntries ...int) {
    83  	defer clearDownloadList(t)
    84  
    85  	expNumEntriesVal := 1
    86  	if len(expNumEntries) > 0 {
    87  		expNumEntriesVal = expNumEntries[0]
    88  	}
    89  
    90  	listDownload, err := api.DownloadGetList(tools.BaseAPIParams(), downloadDescAllRegex, false /*onlyActive*/)
    91  	tassert.CheckFatal(t, err)
    92  	actEntries := len(listDownload)
    93  
    94  	if expNumEntriesVal != actEntries {
    95  		t.Fatalf("Incorrect # of downloader entries: expected %d, actual %d", expNumEntriesVal, actEntries)
    96  	}
    97  }
    98  
    99  func waitForDownload(t *testing.T, id string, timeout time.Duration) {
   100  	var (
   101  		total          time.Duration
   102  		sleep          = cos.ProbingFrequency(timeout)
   103  		found, aborted bool
   104  	)
   105  	for total < timeout {
   106  		time.Sleep(sleep)
   107  		total += sleep
   108  
   109  		what := id
   110  		finished := true
   111  		resp, err := api.DownloadStatus(tools.BaseAPIParams(), id, true)
   112  		if err == nil {
   113  			found = true
   114  			aborted = resp.Job.Aborted
   115  			what = resp.Job.String()
   116  			if l := len(resp.Errs); l > 0 {
   117  				what = fmt.Sprintf("%s (errs: %v)", resp.Job.String(), resp.Errs[:min(2, l)])
   118  			}
   119  			if !resp.JobFinished() {
   120  				finished = false
   121  			}
   122  		}
   123  		if finished && found {
   124  			return
   125  		}
   126  		if total == 5*time.Second {
   127  			tlog.Logf("still waiting for download [%s] to finish\n", what)
   128  			sleep *= 2
   129  		}
   130  		if total >= timeout && !aborted {
   131  			t.Errorf("timed out waiting %v for download [%s] to finish", timeout, what)
   132  		}
   133  	}
   134  }
   135  
   136  func checkDownloadedObjects(t *testing.T, id string, bck cmn.Bck, objects []string) {
   137  	var (
   138  		proxyURL   = tools.RandomProxyURL(t)
   139  		baseParams = tools.BaseAPIParams(proxyURL)
   140  	)
   141  	resp, err := api.DownloadStatus(baseParams, id, false /*onlyActive*/)
   142  	tassert.CheckFatal(t, err)
   143  	tassert.Errorf(
   144  		t, resp.FinishedCnt == len(objects),
   145  		"finished task mismatch (got: %d, expected: %d)",
   146  		resp.FinishedCnt, len(objects),
   147  	)
   148  
   149  	objs, err := tools.ListObjectNames(proxyURL, bck, "", 0, true /*cached*/)
   150  	tassert.CheckFatal(t, err)
   151  	tassert.Errorf(
   152  		t, reflect.DeepEqual(objs, objects),
   153  		"objects mismatch (got: %+v, expected: %+v)", objs, objects,
   154  	)
   155  }
   156  
   157  func downloadObject(t *testing.T, bck cmn.Bck, objName, link string, expectedSkipped, bucketExists bool) {
   158  	id, err := api.DownloadSingle(tools.BaseAPIParams(), generateDownloadDesc(), bck, objName, link)
   159  	tassert.CheckError(t, err)
   160  	if !bucketExists {
   161  		// (virtualized & shared testing env vs metasync propagation time)
   162  		time.Sleep(6 * time.Second)
   163  	}
   164  	waitForDownload(t, id, time.Minute)
   165  	status, err := api.DownloadStatus(tools.BaseAPIParams(), id, false /*onlyActive*/)
   166  	tassert.CheckFatal(t, err)
   167  	tassert.Errorf(t, status.ErrorCnt == 0, "expected no errors during download, got: %d (errs: %v)", status.ErrorCnt, status.Errs)
   168  	if expectedSkipped {
   169  		tassert.Errorf(t, status.SkippedCnt == 1, "expected object to be [skipped: %t]", expectedSkipped)
   170  	} else {
   171  		tassert.Errorf(t, status.FinishedCnt == 1, "expected object to be finished")
   172  	}
   173  }
   174  
   175  //nolint:gocritic // ignoring (dload.BackendBody) hugeParam
   176  func downloadObjectRemote(t *testing.T, body dload.BackendBody, expectedFinished, expectedSkipped int) {
   177  	baseParams := tools.BaseAPIParams()
   178  	body.Description = generateDownloadDesc()
   179  	id, err := api.DownloadWithParam(baseParams, dload.TypeBackend, body)
   180  	tassert.CheckFatal(t, err)
   181  
   182  	waitForDownload(t, id, time.Minute)
   183  
   184  	resp, err := api.DownloadStatus(baseParams, id, false /*onlyActive*/)
   185  	tassert.CheckFatal(t, err)
   186  
   187  	if resp.FinishedCnt > expectedFinished {
   188  		tlog.Logf("Warning: the bucket has extra (leftover?) objects (got: %d, expected: %d)\n", resp.FinishedCnt, expectedFinished)
   189  	} else {
   190  		tassert.Errorf(t, resp.FinishedCnt == expectedFinished,
   191  			"num objects mismatch (got: %d, expected: %d)", resp.FinishedCnt, expectedFinished)
   192  	}
   193  	tassert.Errorf(t, resp.SkippedCnt >= expectedSkipped,
   194  		"skipped objects mismatch (got: %d, expected: %d)", resp.SkippedCnt, expectedSkipped)
   195  }
   196  
   197  func abortDownload(t *testing.T, id string) {
   198  	baseParams := tools.BaseAPIParams()
   199  
   200  	err := api.AbortDownload(baseParams, id)
   201  	tassert.CheckFatal(t, err)
   202  
   203  	waitForDownload(t, id, 16*time.Second)
   204  
   205  	status, err := api.DownloadStatus(baseParams, id, false /*onlyActive*/)
   206  	tassert.CheckFatal(t, err)
   207  	tassert.Fatalf(t, status.Aborted, "download was not marked aborted")
   208  
   209  	if !status.JobFinished() {
   210  		// TODO -- FIXME: ext/dload to fix
   211  		tlog.Logf("job [%s] hasn't finished - retrying api.AbortDownload  ************* (TODO)\n", status.Job.String())
   212  
   213  		err := api.AbortDownload(baseParams, id)
   214  		tassert.CheckFatal(t, err)
   215  
   216  		time.Sleep(4 * time.Second)
   217  
   218  		status, err = api.DownloadStatus(baseParams, id, false /*onlyActive*/)
   219  		tassert.CheckFatal(t, err)
   220  	}
   221  
   222  	tassert.Fatalf(t, status.JobFinished(), "download should be finished")
   223  	tassert.Fatalf(t, len(status.CurrentTasks) == 0, "current tasks should be empty")
   224  }
   225  
   226  func verifyProps(t *testing.T, bck cmn.Bck, objName string, size int64, version string) *cmn.ObjectProps {
   227  	objProps, err := api.HeadObject(tools.BaseAPIParams(), bck, objName, apc.FltPresent, false /*silent*/)
   228  	tassert.CheckFatal(t, err)
   229  
   230  	tassert.Errorf(
   231  		t, objProps.Size == size,
   232  		"size mismatch (%d vs %d)", objProps.Size, size,
   233  	)
   234  	tassert.Errorf(
   235  		t, objProps.Ver == version || objProps.Ver == "",
   236  		"version mismatch (%s vs %s)", objProps.Ver, version,
   237  	)
   238  	return objProps
   239  }
   240  
   241  func TestDownloadSingle(t *testing.T) {
   242  	var (
   243  		proxyURL      = tools.RandomProxyURL(t)
   244  		baseParams    = tools.BaseAPIParams(proxyURL)
   245  		objName       = "object"
   246  		objNameSecond = "object-second"
   247  
   248  		// Links below don't contain protocols to test that no error occurs
   249  		// in case they are missing.
   250  		linkLarge = "storage.googleapis.com/nvdata-openimages/openimages-train-000001.tar"
   251  		linkSmall = "storage.googleapis.com/minikube/iso/minikube-v0.23.0.iso.sha256"
   252  	)
   253  
   254  	runProviderTests(t, func(t *testing.T, bck *meta.Bck) {
   255  		m := ioContext{
   256  			t:   t,
   257  			bck: bck.Clone(),
   258  		}
   259  
   260  		m.init(true /*cleanup*/)
   261  		defer m.del()
   262  
   263  		clearDownloadList(t)
   264  
   265  		id, err := api.DownloadSingle(baseParams, generateDownloadDesc(), m.bck, objName, linkLarge)
   266  		tassert.CheckError(t, err)
   267  
   268  		time.Sleep(time.Second)
   269  
   270  		// Schedule second object.
   271  		idSecond, err := api.DownloadSingle(baseParams, generateDownloadDesc(), m.bck, objNameSecond, linkLarge)
   272  		tassert.CheckError(t, err)
   273  
   274  		// Cancel second object.
   275  		err = api.AbortDownload(baseParams, idSecond)
   276  		tassert.CheckError(t, err)
   277  
   278  		// Cancel first object.
   279  		abortDownload(t, id)
   280  
   281  		time.Sleep(time.Second)
   282  
   283  		// Check if the status is still available after some time.
   284  		if resp, err := api.DownloadStatus(baseParams, id, false /*onlyActive*/); err != nil {
   285  			t.Errorf("got error when getting status for link that is not being downloaded: %v", err)
   286  		} else if !resp.Aborted {
   287  			t.Errorf("canceled link not marked: %v", resp)
   288  		}
   289  
   290  		err = api.AbortDownload(baseParams, id)
   291  		tassert.CheckError(t, err)
   292  
   293  		err = api.RemoveDownload(baseParams, id)
   294  		tassert.CheckError(t, err)
   295  
   296  		err = api.RemoveDownload(baseParams, id)
   297  		tassert.Errorf(t, err != nil, "expected error when removing non-existent task")
   298  
   299  		id, err = api.DownloadSingle(baseParams, generateDownloadDesc(), m.bck, objName, linkSmall)
   300  		tassert.CheckError(t, err)
   301  
   302  		waitForDownload(t, id, 30*time.Second)
   303  		checkDownloadedObjects(t, id, m.bck, []string{objName})
   304  
   305  		checkDownloadList(t, 2)
   306  	})
   307  }
   308  
   309  func TestDownloadRange(t *testing.T) {
   310  	var (
   311  		proxyURL   = tools.RandomProxyURL(t)
   312  		baseParams = tools.BaseAPIParams(proxyURL)
   313  
   314  		template        = "storage.googleapis.com/minikube/iso/minikube-v0.23.{0..4}.iso.sha256"
   315  		expectedObjects = []string{
   316  			"minikube-v0.23.0.iso.sha256",
   317  			"minikube-v0.23.1.iso.sha256",
   318  			"minikube-v0.23.2.iso.sha256",
   319  			"minikube-v0.23.3.iso.sha256",
   320  			"minikube-v0.23.4.iso.sha256",
   321  		}
   322  	)
   323  
   324  	runProviderTests(t, func(t *testing.T, bck *meta.Bck) {
   325  		m := ioContext{
   326  			t:   t,
   327  			bck: bck.Clone(),
   328  		}
   329  
   330  		m.init(true /*cleanup*/)
   331  		defer m.del()
   332  
   333  		clearDownloadList(t)
   334  
   335  		id, err := api.DownloadRange(baseParams, generateDownloadDesc(), m.bck, template)
   336  		tassert.CheckFatal(t, err)
   337  
   338  		waitForDownload(t, id, 10*time.Second)
   339  		checkDownloadedObjects(t, id, m.bck, expectedObjects)
   340  
   341  		checkDownloadList(t)
   342  	})
   343  }
   344  
   345  func TestDownloadMultiRange(t *testing.T) {
   346  	var (
   347  		proxyURL   = tools.RandomProxyURL(t)
   348  		baseParams = tools.BaseAPIParams(proxyURL)
   349  
   350  		template        = "storage.googleapis.com/minikube/iso/minikube-v0.{23..25..2}.{0..1}.iso.sha256"
   351  		expectedObjects = []string{
   352  			"minikube-v0.23.0.iso.sha256",
   353  			"minikube-v0.23.1.iso.sha256",
   354  			"minikube-v0.25.0.iso.sha256",
   355  			"minikube-v0.25.1.iso.sha256",
   356  		}
   357  	)
   358  
   359  	runProviderTests(t, func(t *testing.T, bck *meta.Bck) {
   360  		m := ioContext{
   361  			t:   t,
   362  			bck: bck.Clone(),
   363  		}
   364  
   365  		m.init(true /*cleanup*/)
   366  		defer m.del()
   367  		clearDownloadList(t)
   368  
   369  		id, err := api.DownloadRange(baseParams, generateDownloadDesc(), m.bck, template)
   370  		tassert.CheckFatal(t, err)
   371  
   372  		waitForDownload(t, id, 10*time.Second)
   373  		checkDownloadedObjects(t, id, m.bck, expectedObjects)
   374  
   375  		checkDownloadList(t)
   376  	})
   377  }
   378  
   379  func TestDownloadMultiMap(t *testing.T) {
   380  	var (
   381  		mapping = map[string]string{
   382  			"ais": "https://raw.githubusercontent.com/NVIDIA/aistore/main/README.md",
   383  			"k8s": "https://raw.githubusercontent.com/kubernetes/kubernetes/master/README.md",
   384  		}
   385  		expectedObjects = []string{"ais", "k8s"}
   386  	)
   387  
   388  	runProviderTests(t, func(t *testing.T, bck *meta.Bck) {
   389  		m := ioContext{
   390  			t:   t,
   391  			bck: bck.Clone(),
   392  		}
   393  
   394  		m.init(true /*cleanup*/)
   395  		defer m.del()
   396  		clearDownloadList(t)
   397  
   398  		id, err := api.DownloadMulti(tools.BaseAPIParams(), generateDownloadDesc(), m.bck, mapping)
   399  		tassert.CheckFatal(t, err)
   400  
   401  		waitForDownload(t, id, 30*time.Second)
   402  		checkDownloadedObjects(t, id, m.bck, expectedObjects)
   403  
   404  		checkDownloadList(t)
   405  	})
   406  }
   407  
   408  func TestDownloadMultiList(t *testing.T) {
   409  	var (
   410  		l = []string{
   411  			"https://raw.githubusercontent.com/NVIDIA/aistore/main/README.md",
   412  			"https://raw.githubusercontent.com/kubernetes/kubernetes/master/LICENSE?query=values",
   413  		}
   414  		expectedObjs = []string{"LICENSE", "README.md"}
   415  		proxyURL     = tools.RandomProxyURL(t)
   416  		baseParams   = tools.BaseAPIParams(proxyURL)
   417  	)
   418  
   419  	runProviderTests(t, func(t *testing.T, bck *meta.Bck) {
   420  		m := ioContext{
   421  			t:   t,
   422  			bck: bck.Clone(),
   423  		}
   424  
   425  		m.init(true /*cleanup*/)
   426  		defer m.del()
   427  		clearDownloadList(t)
   428  
   429  		id, err := api.DownloadMulti(baseParams, generateDownloadDesc(), m.bck, l)
   430  		tassert.CheckFatal(t, err)
   431  
   432  		waitForDownload(t, id, 30*time.Second)
   433  		checkDownloadedObjects(t, id, m.bck, expectedObjs)
   434  
   435  		checkDownloadList(t)
   436  	})
   437  }
   438  
   439  func TestDownloadTimeout(t *testing.T) {
   440  	var (
   441  		bck = cmn.Bck{
   442  			Name:     testBucketName,
   443  			Provider: apc.AIS,
   444  		}
   445  		objName    = "object"
   446  		link       = "https://storage.googleapis.com/nvdata-openimages/openimages-train-000001.tar"
   447  		proxyURL   = tools.RandomProxyURL(t)
   448  		baseParams = tools.BaseAPIParams(proxyURL)
   449  	)
   450  
   451  	clearDownloadList(t)
   452  
   453  	tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/)
   454  
   455  	body := dload.SingleBody{
   456  		SingleObj: dload.SingleObj{
   457  			ObjName: objName,
   458  			Link:    link,
   459  		},
   460  	}
   461  	body.Bck = bck
   462  	body.Description = generateDownloadDesc()
   463  	body.Timeout = "1ms" // super small timeout to see if the request will be canceled
   464  
   465  	id, err := api.DownloadWithParam(baseParams, dload.TypeSingle, body)
   466  	tassert.CheckFatal(t, err)
   467  
   468  	time.Sleep(time.Second)
   469  
   470  	status, err := api.DownloadStatus(baseParams, id, false /*onlyActive*/)
   471  	tassert.CheckFatal(t, err)
   472  
   473  	objErr := status.Errs[0]
   474  	tassert.Fatalf(t, status.ErrorCnt == 1, "expected task to be marked as an error")
   475  	tassert.Errorf(
   476  		t, objErr.Name == objName,
   477  		"unexpected name for the object (expected: %q, got: %q)", objName, objErr.Name,
   478  	)
   479  	tassert.Errorf(
   480  		t, strings.Contains(objErr.Err, "deadline exceeded") || strings.Contains(objErr.Err, "timeout"),
   481  		"error mismatch (expected: %q, got: %q)", context.DeadlineExceeded, objErr.Err,
   482  	)
   483  
   484  	checkDownloadedObjects(t, id, bck, []string{})
   485  
   486  	checkDownloadList(t)
   487  }
   488  
   489  func TestDownloadRemote(t *testing.T) {
   490  	var (
   491  		proxyURL   = tools.RandomProxyURL(t)
   492  		baseParams = tools.BaseAPIParams(proxyURL)
   493  
   494  		fileCnt = 5
   495  		prefix  = "imagenet/imagenet_train-"
   496  		suffix  = ".tgz"
   497  	)
   498  
   499  	tests := []struct {
   500  		name   string
   501  		srcBck cmn.Bck
   502  		dstBck cmn.Bck
   503  	}{
   504  		{
   505  			name:   "src==dst",
   506  			srcBck: cliBck,
   507  			dstBck: cliBck,
   508  		},
   509  		{
   510  			name:   "src!=dst",
   511  			srcBck: cliBck,
   512  			dstBck: cmn.Bck{
   513  				Name:     trand.String(5),
   514  				Provider: apc.AIS,
   515  			},
   516  		},
   517  	}
   518  	defer tools.CleanupRemoteBucket(t, proxyURL, cliBck, prefix)
   519  	for i := range tests {
   520  		test := tests[i]
   521  		t.Run(test.name, func(t *testing.T) {
   522  			tools.CheckSkip(t, &tools.SkipTestArgs{Long: true, RemoteBck: true, Bck: test.srcBck})
   523  
   524  			clearDownloadList(t)
   525  
   526  			if test.dstBck.IsAIS() {
   527  				tools.CreateBucket(t, proxyURL, test.dstBck, nil, true /*cleanup*/)
   528  			}
   529  
   530  			tools.CleanupRemoteBucket(t, proxyURL, test.srcBck, prefix)
   531  
   532  			tlog.Logf("putting %d objects into remote bucket %s...\n", fileCnt, test.srcBck)
   533  
   534  			expectedObjs := make([]string, 0, fileCnt)
   535  			for i := range fileCnt {
   536  				reader, err := readers.NewRand(256, cos.ChecksumNone)
   537  				tassert.CheckFatal(t, err)
   538  
   539  				objName := fmt.Sprintf("%s%0*d%s", prefix, 5, i, suffix)
   540  				_, err = api.PutObject(&api.PutArgs{
   541  					BaseParams: baseParams,
   542  					Bck:        test.srcBck,
   543  					ObjName:    objName,
   544  					Reader:     reader,
   545  				})
   546  				tassert.CheckFatal(t, err)
   547  
   548  				expectedObjs = append(expectedObjs, objName)
   549  			}
   550  
   551  			tlog.Logf("(1) evicting a _list_ of objects from remote bucket %s...\n", test.srcBck)
   552  			xid, err := api.EvictMultiObj(baseParams, test.srcBck, expectedObjs, "" /*template*/)
   553  			tassert.CheckFatal(t, err)
   554  			args := xact.ArgsMsg{ID: xid, Kind: apc.ActEvictObjects, Timeout: tools.RebalanceTimeout}
   555  			_, err = api.WaitForXactionIC(baseParams, &args)
   556  			tassert.CheckFatal(t, err)
   557  
   558  			if test.dstBck.IsAIS() {
   559  				tools.CheckSkip(t, &tools.SkipTestArgs{CloudBck: true, Bck: test.srcBck})
   560  				tools.SetBackendBck(t, baseParams, test.dstBck, test.srcBck)
   561  			}
   562  
   563  			tlog.Logf("starting remote download => %s...\n", test.dstBck)
   564  			id, err := api.DownloadWithParam(baseParams, dload.TypeBackend, dload.BackendBody{
   565  				Base: dload.Base{
   566  					Bck:         test.dstBck,
   567  					Description: generateDownloadDesc(),
   568  				},
   569  				Prefix: prefix,
   570  				Suffix: suffix,
   571  			})
   572  			tassert.CheckFatal(t, err)
   573  
   574  			tlog.Logln("waiting for remote download...")
   575  			waitForDownload(t, id, time.Minute)
   576  
   577  			tlog.Logf("listing %s...\n", test.dstBck)
   578  			objs, err := tools.ListObjectNames(proxyURL, test.dstBck, prefix, 0, true /*cached*/)
   579  			tassert.CheckFatal(t, err)
   580  			tassert.Errorf(t, reflect.DeepEqual(objs, expectedObjs), "expected objs: %s, got: %s", expectedObjs, objs)
   581  
   582  			// Test cancellation
   583  			tlog.Logf("(2) evicting a _list_ of objects from remote bucket %s...\n", test.srcBck)
   584  			xid, err = api.EvictMultiObj(baseParams, test.srcBck, expectedObjs, "" /*template*/)
   585  			tassert.CheckFatal(t, err)
   586  			args = xact.ArgsMsg{ID: xid, Kind: apc.ActEvictObjects, Timeout: tools.RebalanceTimeout}
   587  			status, err := api.WaitForXactionIC(baseParams, &args)
   588  			tassert.CheckFatal(t, err)
   589  			if test.srcBck.Equal(&test.dstBck) {
   590  				tassert.CheckFatal(t, err)
   591  			} else {
   592  				// this time downloaded a different bucket - test.srcBck remained empty
   593  				tassert.Errorf(t, status.ErrMsg != "", "expecting errors when when not finding listed objects")
   594  			}
   595  
   596  			tlog.Logln("starting remote download...")
   597  			id, err = api.DownloadWithParam(baseParams, dload.TypeBackend, dload.BackendBody{
   598  				Base: dload.Base{
   599  					Bck:         test.dstBck,
   600  					Description: generateDownloadDesc(),
   601  				},
   602  				Prefix: prefix,
   603  				Suffix: suffix,
   604  			})
   605  			tassert.CheckFatal(t, err)
   606  
   607  			time.Sleep(500 * time.Millisecond)
   608  
   609  			tlog.Logln("aborting remote download...")
   610  			err = api.AbortDownload(baseParams, id)
   611  			tassert.CheckFatal(t, err)
   612  
   613  			resp, err := api.DownloadStatus(baseParams, id, false /*onlyActive*/)
   614  			tassert.CheckFatal(t, err)
   615  			tassert.Errorf(t, resp.Aborted, "canceled remote download %v not marked", id)
   616  
   617  			checkDownloadList(t, 2)
   618  		})
   619  	}
   620  }
   621  
   622  func TestDownloadStatus(t *testing.T) {
   623  	var (
   624  		bck = cmn.Bck{
   625  			Name:     testBucketName,
   626  			Provider: apc.AIS,
   627  		}
   628  		baseParams = tools.BaseAPIParams()
   629  		m          = ioContext{t: t}
   630  	)
   631  
   632  	m.initAndSaveState(true /*cleanup*/)
   633  	m.expectTargets(2)
   634  
   635  	var (
   636  		shortFileName = "shortFile"
   637  		longFileName  = tools.GenerateNotConflictingObjectName(shortFileName, "longFile", bck, m.smap)
   638  	)
   639  
   640  	// NOTE Dec 1/23: gs://nvdata-openimages started to return 403
   641  
   642  	files := map[string]string{
   643  		shortFileName: "https://raw.githubusercontent.com/NVIDIA/aistore/main/README.md",
   644  		// longFileName:  "https://storage.googleapis.com/nvdata-openimages/openimages-train-000001.tar",
   645  		longFileName: "https://raw.githubusercontent.com/NVIDIA/aistore/main/docs/images/ais-s3-tf.gif",
   646  	}
   647  
   648  	clearDownloadList(t)
   649  
   650  	tools.CreateBucket(t, m.proxyURL, bck, nil, true /*cleanup*/)
   651  
   652  	id, err := api.DownloadMulti(baseParams, generateDownloadDesc(), bck, files)
   653  	tassert.CheckFatal(t, err)
   654  
   655  	// Wait for the short file to be downloaded
   656  	err = tools.WaitForObjectToBeDowloaded(baseParams, bck, shortFileName, 13*time.Second)
   657  	tassert.CheckFatal(t, err)
   658  
   659  	resp, err := api.DownloadStatus(baseParams, id, false /*onlyActive*/)
   660  	tassert.CheckFatal(t, err)
   661  
   662  	tassert.Errorf(t, resp.Total == 2, "expected %d objects, got %d", 2, resp.Total)
   663  
   664  	// TODO -- FIXME: see NOTE above
   665  	if resp.FinishedCnt != 1 {
   666  		tlog.Logf("Warning: expected the short file to be downloaded (%d)\n", resp.FinishedCnt)
   667  	}
   668  	if len(resp.CurrentTasks) != 1 {
   669  		tlog.Logf("Warning: did not expect the long file to be already downloaded (%d)\n", len(resp.CurrentTasks))
   670  	} else {
   671  		tassert.Fatalf(
   672  			t, resp.CurrentTasks[0].Name == longFileName,
   673  			"invalid file name in status message, expected: %s, got: %s",
   674  			longFileName, resp.CurrentTasks[0].Name,
   675  		)
   676  	}
   677  	checkDownloadList(t)
   678  }
   679  
   680  func TestDownloadStatusError(t *testing.T) {
   681  	tools.CheckSkip(t, &tools.SkipTestArgs{Long: true})
   682  
   683  	var (
   684  		bck = cmn.Bck{
   685  			Name:     testBucketName,
   686  			Provider: apc.AIS,
   687  		}
   688  		files = map[string]string{
   689  			"invalidURL":   "http://some.invalid.url",
   690  			"notFoundFile": "https://google.com/404.tar",
   691  		}
   692  
   693  		proxyURL   = tools.RandomProxyURL(t)
   694  		baseParams = tools.BaseAPIParams(proxyURL)
   695  	)
   696  
   697  	clearDownloadList(t)
   698  
   699  	// Create ais bucket
   700  	tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/)
   701  
   702  	id, err := api.DownloadMulti(baseParams, generateDownloadDesc(), bck, files)
   703  	tassert.CheckFatal(t, err)
   704  
   705  	// Wait to make sure both files were processed by downloader
   706  	waitForDownload(t, id, 30*time.Second)
   707  
   708  	resp, err := api.DownloadStatus(baseParams, id, false /*onlyActive*/)
   709  	tassert.CheckFatal(t, err)
   710  
   711  	tassert.Errorf(t, resp.Total == len(files), "expected %d objects, got %d", len(files), resp.Total)
   712  	tassert.Errorf(t, resp.FinishedCnt == 0, "expected 0 files to be finished")
   713  	tassert.Fatalf(
   714  		t, resp.ErrorCnt == len(files),
   715  		"expected 2 downloading errors, but got: %d errors: %v", len(resp.Errs), resp.Errs,
   716  	)
   717  
   718  	invalidAddressCausedError := resp.Errs[0].Name == "invalidURL" || resp.Errs[1].Name == "invalidURL"
   719  	notFoundFileCausedError := resp.Errs[0].Name == "notFoundFile" || resp.Errs[1].Name == "notFoundFile"
   720  
   721  	if !(invalidAddressCausedError && notFoundFileCausedError) {
   722  		t.Errorf("expected objects that cause errors to be (%s, %s), but got: (%s, %s)",
   723  			"invalidURL", "notFoundFile", resp.Errs[0].Name, resp.Errs[1].Name)
   724  	}
   725  
   726  	checkDownloadList(t)
   727  }
   728  
   729  func TestDownloadSingleValidExternalAndInternalChecksum(t *testing.T) {
   730  	var (
   731  		proxyURL   = tools.RandomProxyURL(t)
   732  		baseParams = tools.BaseAPIParams(proxyURL)
   733  
   734  		bck = cmn.Bck{
   735  			Name:     testBucketName,
   736  			Provider: apc.AIS,
   737  		}
   738  		objNameFirst  = "object-first"
   739  		objNameSecond = "object-second"
   740  
   741  		linkFirst  = "https://storage.googleapis.com/minikube/iso/minikube-v0.23.2.iso.sha256"
   742  		linkSecond = "https://raw.githubusercontent.com/NVIDIA/aistore/main/README.md"
   743  
   744  		expectedObjects = []string{objNameFirst, objNameSecond}
   745  	)
   746  
   747  	tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/)
   748  
   749  	_, err := api.SetBucketProps(baseParams, bck, &cmn.BpropsToSet{
   750  		Cksum: &cmn.CksumConfToSet{ValidateWarmGet: apc.Ptr(true)},
   751  	})
   752  	tassert.CheckFatal(t, err)
   753  
   754  	id, err := api.DownloadSingle(baseParams, generateDownloadDesc(), bck, objNameFirst, linkFirst)
   755  	tassert.CheckError(t, err)
   756  	id2, err := api.DownloadSingle(baseParams, generateDownloadDesc(), bck, objNameSecond, linkSecond)
   757  	tassert.CheckError(t, err)
   758  
   759  	waitForDownload(t, id, 10*time.Second)
   760  	waitForDownload(t, id2, 10*time.Second)
   761  
   762  	// If the file was successfully downloaded, it means that the external checksum was correct. Also because of the
   763  	// ValidateWarmGet property being set to True, if it was downloaded without errors then the internal checksum was
   764  	// also set properly
   765  	tools.EnsureObjectsExist(t, baseParams, bck, expectedObjects...)
   766  }
   767  
   768  func TestDownloadMultiValidExternalAndInternalChecksum(t *testing.T) {
   769  	var (
   770  		proxyURL   = tools.RandomProxyURL(t)
   771  		baseParams = tools.BaseAPIParams(proxyURL)
   772  
   773  		bck = cmn.Bck{
   774  			Name:     testBucketName,
   775  			Provider: apc.AIS,
   776  		}
   777  		objNameFirst  = "linkFirst"
   778  		objNameSecond = "linkSecond"
   779  
   780  		m = map[string]string{
   781  			"linkFirst":  "https://storage.googleapis.com/minikube/iso/minikube-v0.23.2.iso.sha256",
   782  			"linkSecond": "https://raw.githubusercontent.com/NVIDIA/aistore/main/README.md",
   783  		}
   784  
   785  		expectedObjects = []string{objNameFirst, objNameSecond}
   786  	)
   787  
   788  	tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/)
   789  
   790  	_, err := api.SetBucketProps(baseParams, bck, &cmn.BpropsToSet{
   791  		Cksum: &cmn.CksumConfToSet{ValidateWarmGet: apc.Ptr(true)},
   792  	})
   793  	tassert.CheckFatal(t, err)
   794  
   795  	id, err := api.DownloadMulti(baseParams, generateDownloadDesc(), bck, m)
   796  	tassert.CheckFatal(t, err)
   797  
   798  	waitForDownload(t, id, 30*time.Second)
   799  	checkDownloadedObjects(t, id, bck, expectedObjects)
   800  
   801  	tools.EnsureObjectsExist(t, baseParams, bck, expectedObjects...)
   802  }
   803  
   804  func TestDownloadRangeValidExternalAndInternalChecksum(t *testing.T) {
   805  	tools.CheckSkip(t, &tools.SkipTestArgs{Long: true})
   806  
   807  	var (
   808  		proxyURL   = tools.RandomProxyURL(t)
   809  		baseParams = tools.BaseAPIParams(proxyURL)
   810  
   811  		bck = cmn.Bck{
   812  			Name:     testBucketName,
   813  			Provider: apc.AIS,
   814  		}
   815  
   816  		template        = "storage.googleapis.com/minikube/iso/minikube-v0.{21..25..2}.0.iso.sha256"
   817  		expectedObjects = []string{
   818  			"minikube-v0.21.0.iso.sha256",
   819  			"minikube-v0.23.0.iso.sha256",
   820  			"minikube-v0.25.0.iso.sha256",
   821  		}
   822  	)
   823  
   824  	tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/)
   825  
   826  	_, err := api.SetBucketProps(baseParams, bck, &cmn.BpropsToSet{
   827  		Cksum: &cmn.CksumConfToSet{ValidateWarmGet: apc.Ptr(true)},
   828  	})
   829  	tassert.CheckFatal(t, err)
   830  
   831  	id, err := api.DownloadRange(baseParams, generateDownloadDesc(), bck, template)
   832  	tassert.CheckFatal(t, err)
   833  
   834  	waitForDownload(t, id, 10*time.Second)
   835  	checkDownloadedObjects(t, id, bck, expectedObjects)
   836  
   837  	tools.EnsureObjectsExist(t, baseParams, bck, expectedObjects...)
   838  }
   839  
   840  func TestDownloadIntoNonexistentBucket(t *testing.T) {
   841  	var (
   842  		baseParams = tools.BaseAPIParams()
   843  		objName    = "object"
   844  		obj        = "storage.googleapis.com/nvdata-openimages/openimages-train-000001.tar"
   845  	)
   846  
   847  	bucket, err := tools.GenerateNonexistentBucketName("download", baseParams)
   848  	tassert.CheckFatal(t, err)
   849  
   850  	bck := cmn.Bck{
   851  		Name:     bucket,
   852  		Provider: apc.AIS,
   853  	}
   854  
   855  	_, err = api.DownloadSingle(baseParams, generateDownloadDesc(), bck, objName, obj)
   856  	tassert.CheckError(t, err)
   857  	api.DestroyBucket(baseParams, bck)
   858  }
   859  
   860  func TestDownloadMountpath(t *testing.T) {
   861  	var (
   862  		proxyURL   = tools.RandomProxyURL(t)
   863  		baseParams = tools.BaseAPIParams(proxyURL)
   864  		bck        = cmn.Bck{
   865  			Name:     trand.String(15),
   866  			Provider: apc.AIS,
   867  		}
   868  		objsCnt  = 100
   869  		template = "storage.googleapis.com/nvdata-openimages/openimages-train-{000000..000050}.tar"
   870  		m        = make(map[string]string, objsCnt)
   871  	)
   872  
   873  	clearDownloadList(t)
   874  
   875  	// Prepare objects to be downloaded. Multiple objects to make
   876  	// sure that at least one of them gets into target with disabled mountpath.
   877  	for i := range objsCnt {
   878  		m[strconv.FormatInt(int64(i), 10)] = "https://raw.githubusercontent.com/NVIDIA/aistore/main/README.md"
   879  	}
   880  
   881  	tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/)
   882  
   883  	id1, err := api.DownloadRange(baseParams, generateDownloadDesc(), bck, template)
   884  	tassert.CheckFatal(t, err)
   885  	tlog.Logf("Started very large download job %s (intended to be aborted)\n", id1)
   886  
   887  	// Abort just in case something goes wrong.
   888  	t.Cleanup(func() {
   889  		abortDownload(t, id1)
   890  	})
   891  
   892  	tlog.Logln("Wait a while for downloaders to pick up...")
   893  	time.Sleep(2 * time.Second)
   894  
   895  	smap := tools.GetClusterMap(t, proxyURL)
   896  	selectedTarget, _ := smap.GetRandTarget()
   897  
   898  	mpathList, err := api.GetMountpaths(baseParams, selectedTarget)
   899  	tassert.CheckFatal(t, err)
   900  	tassert.Fatalf(t, len(mpathList.Available) >= 2, "%s requires 2 or more mountpaths", t.Name())
   901  
   902  	mpathID := cos.NowRand().Intn(len(mpathList.Available))
   903  	removeMpath := mpathList.Available[mpathID]
   904  	tlog.Logf("Disabling mountpath %q at %s\n", removeMpath, selectedTarget.StringEx())
   905  	err = api.DisableMountpath(baseParams, selectedTarget, removeMpath, true /*dont-resil*/)
   906  	tassert.CheckFatal(t, err)
   907  
   908  	defer func() {
   909  		tlog.Logf("Enabling mountpath %q at %s\n", removeMpath, selectedTarget.StringEx())
   910  		err = api.EnableMountpath(baseParams, selectedTarget, removeMpath)
   911  		tassert.CheckFatal(t, err)
   912  
   913  		tools.WaitForResilvering(t, baseParams, selectedTarget)
   914  		ensureNumMountpaths(t, selectedTarget, mpathList)
   915  	}()
   916  
   917  	// Downloader finished on the target `selectedTarget`, safe to abort the rest.
   918  	time.Sleep(time.Second)
   919  	tlog.Logf("Aborting download job %s\n", id1)
   920  	abortDownload(t, id1)
   921  
   922  	tlog.Logf("Listing %s\n", bck)
   923  	objs, err := tools.ListObjectNames(proxyURL, bck, "", 0, true /*cached*/)
   924  	tassert.CheckError(t, err)
   925  	tassert.Fatalf(t, len(objs) == 0, "objects should not have been downloaded, download should have been aborted\n")
   926  
   927  	id2, err := api.DownloadMulti(baseParams, generateDownloadDesc(), bck, m)
   928  	tassert.CheckFatal(t, err)
   929  	tlog.Logf("Started download job %s, waiting for it to finish\n", id2)
   930  
   931  	waitForDownload(t, id2, time.Minute)
   932  	objs, err = tools.ListObjectNames(proxyURL, bck, "", 0, true /*cached*/)
   933  	tassert.CheckError(t, err)
   934  	tassert.Fatalf(t, len(objs) == objsCnt, "Expected %d objects to be present, got: %d", objsCnt, len(objs))
   935  }
   936  
   937  func TestDownloadOverrideObject(t *testing.T) {
   938  	var (
   939  		proxyURL   = tools.RandomProxyURL(t)
   940  		baseParams = tools.BaseAPIParams(proxyURL)
   941  		bck        = cmn.Bck{
   942  			Name:     trand.String(10),
   943  			Provider: apc.AIS,
   944  		}
   945  		p = bck.DefaultProps(initialClusterConfig)
   946  
   947  		objName = trand.String(10)
   948  		link    = "https://storage.googleapis.com/minikube/iso/minikube-v0.23.2.iso.sha256"
   949  
   950  		expectedSize int64 = 65
   951  	)
   952  
   953  	clearDownloadList(t)
   954  
   955  	tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/)
   956  
   957  	downloadObject(t, bck, objName, link, false /*expectedSkipped*/, true /*bucket exists*/)
   958  	oldProps := verifyProps(t, bck, objName, expectedSize, "1")
   959  
   960  	// Disallow PUT (TODO: use apc.AceObjUpdate instead when/if supported)
   961  	aattrs := apc.AccessAll &^ apc.AcePUT
   962  	_, err := api.SetBucketProps(baseParams, bck, &cmn.BpropsToSet{
   963  		Access: apc.Ptr(aattrs),
   964  	})
   965  	tassert.CheckFatal(t, err)
   966  
   967  	tlog.Logln("Trying to update the object (expecting to fail)")
   968  	r, _ := readers.NewRand(10, p.Cksum.Type)
   969  	_, err = api.PutObject(&api.PutArgs{
   970  		BaseParams: baseParams,
   971  		Bck:        bck,
   972  		ObjName:    objName,
   973  		Cksum:      r.Cksum(),
   974  		Reader:     r,
   975  	})
   976  	tassert.Fatalf(t, err != nil, "expected: err!=nil, got: nil")
   977  	verifyProps(t, bck, objName, expectedSize, "1")
   978  
   979  	// Allow PUT back again (TODO: ditto)
   980  	tlog.Logln("Allow updates back again, and download " + link)
   981  	aattrs = apc.AccessAll
   982  	_, err = api.SetBucketProps(baseParams, bck, &cmn.BpropsToSet{
   983  		Access: apc.Ptr(aattrs),
   984  	})
   985  	tassert.CheckFatal(t, err)
   986  
   987  	downloadObject(t, bck, objName, link, true /*expectedSkipped*/, true /*bucket exists*/)
   988  	newProps := verifyProps(t, bck, objName, expectedSize, "1")
   989  	tassert.Errorf(
   990  		t, oldProps.Atime == newProps.Atime,
   991  		"atime match (%v != %v)", oldProps.Atime, newProps.Atime,
   992  	)
   993  }
   994  
   995  func TestDownloadOverrideObjectWeb(t *testing.T) {
   996  	var (
   997  		proxyURL   = tools.RandomProxyURL(t)
   998  		baseParams = tools.BaseAPIParams(proxyURL)
   999  		bck        = cmn.Bck{
  1000  			Name:     trand.String(10),
  1001  			Provider: apc.AIS,
  1002  		}
  1003  		p = bck.DefaultProps(initialClusterConfig)
  1004  
  1005  		objName = trand.String(10)
  1006  		link    = "https://raw.githubusercontent.com/NVIDIA/aistore/main/LICENSE"
  1007  
  1008  		expectedSize int64 = 1075
  1009  		newSize      int64 = 10
  1010  	)
  1011  
  1012  	clearDownloadList(t)
  1013  
  1014  	downloadObject(t, bck, objName, link, false /*expectedSkipped*/, false /*destination bucket exists*/)
  1015  
  1016  	t.Cleanup(func() {
  1017  		tools.DestroyBucket(t, proxyURL, bck)
  1018  	})
  1019  
  1020  	oldProps := verifyProps(t, bck, objName, expectedSize, "1")
  1021  
  1022  	// Update the file
  1023  	r, _ := readers.NewRand(newSize, p.Cksum.Type)
  1024  	_, err := api.PutObject(&api.PutArgs{
  1025  		BaseParams: baseParams,
  1026  		Bck:        bck,
  1027  		ObjName:    objName,
  1028  		Cksum:      r.Cksum(),
  1029  		Reader:     r,
  1030  	})
  1031  	tassert.Fatalf(t, err == nil, "expected: err nil, got: %v", err)
  1032  	verifyProps(t, bck, objName, newSize, "2")
  1033  
  1034  	downloadObject(t, bck, objName, link, false /*expectedSkipped*/, true /*bucket exists*/)
  1035  	newProps := verifyProps(t, bck, objName, expectedSize, "3")
  1036  	tassert.Errorf(
  1037  		t, oldProps.Atime != newProps.Atime,
  1038  		"atime match (%v == %v)", oldProps.Atime, newProps.Atime,
  1039  	)
  1040  }
  1041  
  1042  func TestDownloadOverrideObjectRemote(t *testing.T) {
  1043  	var (
  1044  		proxyURL   = tools.RandomProxyURL(t)
  1045  		baseParams = tools.BaseAPIParams(proxyURL)
  1046  		bck        = cmn.Bck{
  1047  			Name:     trand.String(10),
  1048  			Provider: apc.AIS,
  1049  		}
  1050  		dlBody = dload.BackendBody{
  1051  			Base: dload.Base{Bck: bck},
  1052  		}
  1053  		m = &ioContext{
  1054  			t:                   t,
  1055  			num:                 10,
  1056  			bck:                 cliBck,
  1057  			deleteRemoteBckObjs: true,
  1058  		}
  1059  	)
  1060  
  1061  	tools.CheckSkip(t, &tools.SkipTestArgs{CloudBck: true, Bck: m.bck})
  1062  
  1063  	m.init(true /*cleanup*/)
  1064  	m.remotePuts(false /*evict*/)
  1065  
  1066  	tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/)
  1067  	tools.SetBackendBck(t, baseParams, bck, m.bck)
  1068  
  1069  	downloadObjectRemote(t, dlBody, m.num, 0)
  1070  	m.remotePuts(false /*evict*/)
  1071  	downloadObjectRemote(t, dlBody, m.num, 0)
  1072  }
  1073  
  1074  func TestDownloadSkipObject(t *testing.T) {
  1075  	var (
  1076  		proxyURL = tools.RandomProxyURL(t)
  1077  		bck      = cmn.Bck{
  1078  			Name:     trand.String(10),
  1079  			Provider: apc.AIS,
  1080  		}
  1081  
  1082  		objName = trand.String(10)
  1083  		link    = "https://storage.googleapis.com/minikube/iso/minikube-v0.23.2.iso.sha256"
  1084  
  1085  		expectedSize    int64 = 65
  1086  		expectedVersion       = "1"
  1087  	)
  1088  
  1089  	tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/)
  1090  
  1091  	downloadObject(t, bck, objName, link, false /*expectedSkipped*/, true /*bucket exists*/)
  1092  	oldProps := verifyProps(t, bck, objName, expectedSize, expectedVersion)
  1093  
  1094  	downloadObject(t, bck, objName, link, true /*expectedSkipped*/, true /*bucket exists*/)
  1095  	newProps := verifyProps(t, bck, objName, expectedSize, expectedVersion)
  1096  	tassert.Errorf(
  1097  		t, oldProps.Atime == newProps.Atime,
  1098  		"atime mismatch (%v vs %v)", oldProps.Atime, newProps.Atime,
  1099  	)
  1100  }
  1101  
  1102  func TestDownloadSkipObjectRemote(t *testing.T) {
  1103  	var (
  1104  		proxyURL   = tools.RandomProxyURL(t)
  1105  		baseParams = tools.BaseAPIParams(proxyURL)
  1106  		bck        = cmn.Bck{
  1107  			Name:     trand.String(10),
  1108  			Provider: apc.AIS,
  1109  		}
  1110  		dlBody = dload.BackendBody{
  1111  			Base: dload.Base{Bck: bck},
  1112  		}
  1113  		m = &ioContext{
  1114  			t:                   t,
  1115  			num:                 10,
  1116  			bck:                 cliBck,
  1117  			deleteRemoteBckObjs: true,
  1118  		}
  1119  	)
  1120  
  1121  	tools.CheckSkip(t, &tools.SkipTestArgs{CloudBck: true, Bck: m.bck})
  1122  
  1123  	m.init(true /*cleanup*/)
  1124  	m.remotePuts(false /*evict*/)
  1125  
  1126  	tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/)
  1127  	tools.SetBackendBck(t, baseParams, bck, m.bck)
  1128  
  1129  	downloadObjectRemote(t, dlBody, m.num, 0)
  1130  	downloadObjectRemote(t, dlBody, m.num, m.num)
  1131  
  1132  	// Put some more remote objects (we expect that only the new ones will be downloaded)
  1133  	m.num += 10
  1134  	m.remoteRefill()
  1135  
  1136  	downloadObjectRemote(t, dlBody, m.num, m.num-10)
  1137  }
  1138  
  1139  func TestDownloadSync(t *testing.T) {
  1140  	var (
  1141  		proxyURL   = tools.RandomProxyURL(t)
  1142  		baseParams = tools.BaseAPIParams(proxyURL)
  1143  		bck        = cmn.Bck{
  1144  			Name:     trand.String(10),
  1145  			Provider: apc.AIS,
  1146  		}
  1147  		dlBody = dload.BackendBody{
  1148  			Base: dload.Base{Bck: bck},
  1149  		}
  1150  		m = &ioContext{
  1151  			t:                   t,
  1152  			num:                 10,
  1153  			bck:                 cliBck,
  1154  			deleteRemoteBckObjs: true,
  1155  		}
  1156  		objsToDelete = 4
  1157  	)
  1158  
  1159  	tools.CheckSkip(t, &tools.SkipTestArgs{CloudBck: true, Bck: m.bck})
  1160  
  1161  	m.init(true /*cleanup*/)
  1162  	m.remotePuts(false /*evict*/)
  1163  
  1164  	tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/)
  1165  	tools.SetBackendBck(t, baseParams, bck, m.bck)
  1166  
  1167  	tlog.Logln("1. initial sync of remote bucket...")
  1168  	dlBody.Sync = true
  1169  	downloadObjectRemote(t, dlBody, m.num, 0)
  1170  	downloadObjectRemote(t, dlBody, m.num, m.num)
  1171  
  1172  	m.del(objsToDelete)
  1173  
  1174  	// Check that only deleted objects are replaced (deleted in this case).
  1175  	tlog.Logln("2. re-syncing remote bucket...")
  1176  	downloadObjectRemote(t, dlBody, m.num, m.num-objsToDelete)
  1177  	downloadObjectRemote(t, dlBody, m.num-objsToDelete, m.num-objsToDelete)
  1178  
  1179  	tlog.Logln("3. syncing from remote bucket (to \"refill\" removed remote objects)...")
  1180  	m.remoteRefill()
  1181  
  1182  	// Check that new objects are correctly downloaded.
  1183  	tlog.Logln("4. re-syncing remote bucket...")
  1184  	downloadObjectRemote(t, dlBody, m.num, m.num-objsToDelete)
  1185  	downloadObjectRemote(t, dlBody, m.num, m.num)
  1186  	dlBody.Sync = false
  1187  	downloadObjectRemote(t, dlBody, m.num, m.num)
  1188  
  1189  	tlog.Logln("5. overridding the objects and deleting some of them...")
  1190  	m.remotePuts(false /*evict*/, true /*override*/)
  1191  	m.del(objsToDelete)
  1192  
  1193  	// Check that all objects have been replaced.
  1194  	tlog.Logln("6. re-syncing remote bucket...")
  1195  	dlBody.Sync = true
  1196  	downloadObjectRemote(t, dlBody, m.num, 0)
  1197  	downloadObjectRemote(t, dlBody, m.num-objsToDelete, m.num-objsToDelete)
  1198  
  1199  	tlog.Logln("7. check that syncing with prefix and suffix works")
  1200  	dlBody.Prefix = "someprefix-"
  1201  	dlBody.Suffix = ""
  1202  	downloadObjectRemote(t, dlBody, 0, 0)
  1203  	dlBody.Prefix = ""
  1204  	dlBody.Suffix = "somesuffix-"
  1205  	downloadObjectRemote(t, dlBody, 0, 0)
  1206  }
  1207  
  1208  func TestDownloadJobLimitConnections(t *testing.T) {
  1209  	tools.CheckSkip(t, &tools.SkipTestArgs{Long: true})
  1210  
  1211  	const (
  1212  		limitConnection = 2
  1213  		template        = "https://storage.googleapis.com/minikube/iso/minikube-v0.{18..35}.{0..1}.iso"
  1214  		maxWait         = 13 * time.Second
  1215  	)
  1216  
  1217  	var (
  1218  		proxyURL   = tools.RandomProxyURL(t)
  1219  		baseParams = tools.BaseAPIParams(proxyURL)
  1220  		bck        = cmn.Bck{
  1221  			Name:     trand.String(10),
  1222  			Provider: apc.AIS,
  1223  		}
  1224  		totalWait                 time.Duration
  1225  		xactID                    string
  1226  		minConnectionLimitReached bool
  1227  	)
  1228  
  1229  	tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/)
  1230  
  1231  	smap, err := api.GetClusterMap(baseParams)
  1232  	tassert.CheckFatal(t, err)
  1233  	targetCnt := smap.CountActiveTs()
  1234  
  1235  	id, err := api.DownloadWithParam(baseParams, dload.TypeRange, dload.RangeBody{
  1236  		Base: dload.Base{
  1237  			Bck:         bck,
  1238  			Description: generateDownloadDesc(),
  1239  			Limits: dload.Limits{
  1240  				Connections:  limitConnection,
  1241  				BytesPerHour: 200 * cos.MiB,
  1242  			},
  1243  		},
  1244  		Template: template,
  1245  	})
  1246  	tassert.CheckFatal(t, err)
  1247  	t.Cleanup(func() {
  1248  		abortDownload(t, id)
  1249  	})
  1250  
  1251  	time.Sleep(2 * time.Second)
  1252  	tlog.Logf("download %q: checking num current tasks\n", id)
  1253  	for totalWait < maxWait {
  1254  		status, err := api.DownloadStatus(baseParams, id, false /*onlyActive*/)
  1255  		tassert.CheckFatal(t, err)
  1256  
  1257  		if xactID == "" {
  1258  			xactID = status.Job.XactID
  1259  		} else if status.Job.XactID != "" {
  1260  			tassert.Errorf(t, xactID == status.Job.XactID, "xactID %q vs %q", xactID, status.Job.XactID)
  1261  		}
  1262  
  1263  		// Expect that we never exceed the limit of connections per target.
  1264  		tassert.Errorf(
  1265  			t, len(status.CurrentTasks) <= limitConnection*targetCnt,
  1266  			"number of tasks mismatch (expected as most: %d, got: %d)",
  1267  			2*targetCnt, len(status.CurrentTasks),
  1268  		)
  1269  
  1270  		// Expect that at some point in time there are more connections than targets.
  1271  		if len(status.CurrentTasks) > targetCnt {
  1272  			minConnectionLimitReached = true
  1273  			tlog.Logln("reached the expected minimum num tasks - aborting job " + id)
  1274  			break
  1275  		}
  1276  
  1277  		time.Sleep(time.Second)
  1278  		totalWait += time.Second
  1279  		if totalWait > maxWait/2 {
  1280  			var errs string
  1281  			if l := len(status.Errs); l > 0 {
  1282  				errs = fmt.Sprintf(", errs=%v", status.Errs[:min(2, l)])
  1283  			}
  1284  			tlog.Logf("download %q: num-tasks %d <= %d num-targets, job [%s]%s\n",
  1285  				id, len(status.CurrentTasks), targetCnt, status.Job.String(), errs)
  1286  		}
  1287  	}
  1288  
  1289  	if minConnectionLimitReached {
  1290  		return
  1291  	}
  1292  	if status, err := api.DownloadStatus(baseParams, id, false /*onlyActive*/); err == nil {
  1293  		if len(status.Errs) > 0 {
  1294  			if strings.Contains(status.Errs[0].Err, "does not exist") {
  1295  				return
  1296  			}
  1297  		}
  1298  	}
  1299  
  1300  	tassert.Errorf(t, minConnectionLimitReached, "expected more running tasks (conn-s) than the number of targets")
  1301  }
  1302  
  1303  func TestDownloadJobConcurrency(t *testing.T) {
  1304  	tools.CheckSkip(t, &tools.SkipTestArgs{Long: true})
  1305  
  1306  	var (
  1307  		proxyURL   = tools.RandomProxyURL(t)
  1308  		baseParams = tools.BaseAPIParams(proxyURL)
  1309  		bck        = cmn.Bck{
  1310  			Name:     trand.String(10),
  1311  			Provider: apc.AIS,
  1312  		}
  1313  
  1314  		template = "https://storage.googleapis.com/minikube/iso/minikube-v0.{18..35}.0.iso"
  1315  	)
  1316  
  1317  	tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/)
  1318  
  1319  	smap, err := api.GetClusterMap(baseParams)
  1320  	tassert.CheckFatal(t, err)
  1321  
  1322  	tlog.Logln("Starting first download...")
  1323  
  1324  	id1, err := api.DownloadWithParam(baseParams, dload.TypeRange, dload.RangeBody{
  1325  		Base: dload.Base{
  1326  			Bck:         bck,
  1327  			Description: generateDownloadDesc(),
  1328  			Limits: dload.Limits{
  1329  				Connections:  1,
  1330  				BytesPerHour: 100 * cos.MiB,
  1331  			},
  1332  		},
  1333  		Template: template,
  1334  	})
  1335  	tassert.CheckFatal(t, err)
  1336  	t.Cleanup(func() {
  1337  		abortDownload(t, id1)
  1338  	})
  1339  
  1340  	tlog.Logln("Starting second download...")
  1341  
  1342  	id2, err := api.DownloadWithParam(baseParams, dload.TypeRange, dload.RangeBody{
  1343  		Base: dload.Base{
  1344  			Bck:         bck,
  1345  			Description: generateDownloadDesc(),
  1346  			Limits: dload.Limits{
  1347  				BytesPerHour: 100 * cos.MiB,
  1348  			},
  1349  		},
  1350  		Template: template,
  1351  	})
  1352  	tassert.CheckFatal(t, err)
  1353  	t.Cleanup(func() {
  1354  		abortDownload(t, id2)
  1355  	})
  1356  
  1357  	tlog.Logln("Waiting for checks...")
  1358  	var (
  1359  		concurrentJobs bool
  1360  		resp1, resp2   *dload.StatusResp
  1361  	)
  1362  	for range 10 {
  1363  		resp1, err = api.DownloadStatus(baseParams, id1, false /*onlyActive*/)
  1364  		tassert.CheckFatal(t, err)
  1365  
  1366  		// Expect that number of tasks never exceeds the defined limit.
  1367  		targetCnt := smap.CountActiveTs()
  1368  		tassert.Errorf(
  1369  			t, len(resp1.CurrentTasks) <= targetCnt,
  1370  			"number of tasks mismatch (expected at most: %d, got: %d)",
  1371  			targetCnt, len(resp1.CurrentTasks),
  1372  		)
  1373  
  1374  		// Expect that at some point the second job will be run concurrently.
  1375  		resp2, err = api.DownloadStatus(baseParams, id2, false /*onlyActive*/)
  1376  		tassert.CheckFatal(t, err)
  1377  
  1378  		if len(resp2.CurrentTasks) > 0 && len(resp1.CurrentTasks) > 0 {
  1379  			concurrentJobs = true
  1380  		}
  1381  		time.Sleep(time.Second)
  1382  	}
  1383  
  1384  	tassert.Errorf(t, concurrentJobs, "expected jobs to run concurrently")
  1385  	tlog.Logln("Done waiting")
  1386  }
  1387  
  1388  // NOTE: Test may fail if the network is SUPER slow!!
  1389  func TestDownloadJobBytesThrottling(t *testing.T) {
  1390  	tools.CheckSkip(t, &tools.SkipTestArgs{Long: true})
  1391  
  1392  	const (
  1393  		link = "https://storage.googleapis.com/minikube/iso/minikube-v0.35.0.iso"
  1394  
  1395  		// Bytes per hour limit.
  1396  		softLimit = 5 * cos.KiB
  1397  		// Downloader could potentially download a little bit more but should
  1398  		// never exceed this.
  1399  		hardLimit = 7 * cos.KiB
  1400  	)
  1401  
  1402  	var (
  1403  		proxyURL   = tools.RandomProxyURL(t)
  1404  		baseParams = tools.BaseAPIParams(proxyURL)
  1405  		bck        = cmn.Bck{
  1406  			Name:     trand.String(10),
  1407  			Provider: apc.AIS,
  1408  		}
  1409  	)
  1410  
  1411  	tools.CreateBucket(t, proxyURL, bck, nil, true /*cleanup*/)
  1412  
  1413  	id, err := api.DownloadWithParam(baseParams, dload.TypeSingle, dload.SingleBody{
  1414  		Base: dload.Base{
  1415  			Bck:         bck,
  1416  			Description: generateDownloadDesc(),
  1417  			Limits: dload.Limits{
  1418  				BytesPerHour: softLimit,
  1419  			},
  1420  		},
  1421  		SingleObj: dload.SingleObj{
  1422  			ObjName: "object",
  1423  			Link:    link,
  1424  		},
  1425  	})
  1426  	tassert.CheckFatal(t, err)
  1427  	t.Cleanup(func() {
  1428  		abortDownload(t, id)
  1429  	})
  1430  
  1431  	time.Sleep(10 * time.Second) // wait for downloader to download `softLimit` bytes
  1432  
  1433  	resp, err := api.DownloadStatus(baseParams, id, false /*onlyActive*/)
  1434  	tassert.CheckFatal(t, err)
  1435  
  1436  	tassert.Fatalf(t, len(resp.CurrentTasks) == 1, "expected one running task")
  1437  	tassert.Errorf(
  1438  		t, resp.CurrentTasks[0].Downloaded < hardLimit,
  1439  		"no more than %d should be downloaded, got: %d",
  1440  		hardLimit, resp.CurrentTasks[0].Downloaded,
  1441  	)
  1442  }