github.com/htcondor/osdf-client/v6@v6.13.0-rc1.0.20231009141709-766e7b4d1dc8/handle_http.go (about)

     1  package stashcp
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"net"
     9  	"net/http"
    10  	"net/http/httputil"
    11  	"net/url"
    12  	"os"
    13  	"path"
    14  	"regexp"
    15  	"strconv"
    16  	"strings"
    17  	"sync"
    18  	"sync/atomic"
    19  	"syscall"
    20  	"time"
    21  
    22  	grab "github.com/cavaliercoder/grab"
    23  	log "github.com/sirupsen/logrus"
    24  	"github.com/studio-b12/gowebdav"
    25  	"github.com/vbauerster/mpb/v7"
    26  	"github.com/vbauerster/mpb/v7/decor"
    27  
    28  	namespaces "github.com/htcondor/osdf-client/v6/namespaces"
    29  )
    30  
    31  var env_prefixes = [...]string{"OSG", "OSDF"}
    32  
    33  var p = mpb.New()
    34  
    35  type StoppedTransferError struct {
    36  	Err 	string
    37  }
    38  
    39  func (e *StoppedTransferError) Error() string {
    40  	return e.Err
    41  }
    42  
    43  
    44  // SlowTransferError is an error that is returned when a transfer takes longer than the configured timeout
    45  type SlowTransferError struct {
    46  	BytesTransferred int64
    47  	BytesPerSecond   int64
    48  	BytesTotal       int64
    49  	Duration         time.Duration
    50  }
    51  
    52  func (e *SlowTransferError) Error() string {
    53  	return "cancelled transfer, too slow.  Detected speed: " +
    54  		ByteCountSI(e.BytesPerSecond) +
    55  		"/s, total transferred: " +
    56  		ByteCountSI(e.BytesTransferred) +
    57  		", total transfer time: " +
    58  		e.Duration.String()
    59  }
    60  
    61  func (e *SlowTransferError) Is(target error) bool {
    62  	_, ok := target.(*SlowTransferError)
    63  	return ok
    64  }
    65  
    66  type FileDownloadError struct {
    67  	Text string
    68  	Err  error
    69  }
    70  
    71  func (e *FileDownloadError) Error() string {
    72  	return e.Text
    73  }
    74  
    75  func (e *FileDownloadError) Unwrap() error {
    76  	return e.Err
    77  }
    78  
    79  // Determines whether or not we can interact with the site HTTP proxy
    80  func IsProxyEnabled() bool {
    81  	if _, isSet := os.LookupEnv("http_proxy"); !isSet {
    82  		return false
    83  	}
    84  	for _, prefix := range env_prefixes {
    85  		if _, isSet := os.LookupEnv(prefix + "_DISABLE_HTTP_PROXY"); isSet {
    86  			return false
    87  		}
    88  	}
    89  	return true
    90  }
    91  
    92  // Determine whether we are allowed to skip the proxy as a fallback
    93  func CanDisableProxy() bool {
    94  	for _, prefix := range env_prefixes {
    95  		if _, isSet := os.LookupEnv(prefix + "_DISABLE_PROXY_FALLBACK"); isSet {
    96  			return false
    97  		}
    98  	}
    99  	return true
   100  }
   101  
   102  // ConnectionSetupError is an error that is returned when a connection to the remote server fails
   103  type ConnectionSetupError struct {
   104  	URL string
   105  	Err error
   106  }
   107  
   108  func (e *ConnectionSetupError) Error() string {
   109  	if e.Err != nil {
   110  		if len(e.URL) > 0 {
   111  			return "failed connection setup to " + e.URL + ": " + e.Err.Error()
   112  		} else {
   113  			return "failed connection setup: " + e.Err.Error()
   114  		}
   115  	} else {
   116  		return "Connection to remote server failed"
   117  	}
   118  
   119  }
   120  
   121  func (e *ConnectionSetupError) Unwrap() error {
   122  	return e.Err
   123  }
   124  
   125  func (e *ConnectionSetupError) Is(target error) bool {
   126  	_, ok := target.(*ConnectionSetupError)
   127  	return ok
   128  }
   129  
   130  // HasPort test the host if it includes a port
   131  func HasPort(host string) bool {
   132  	var checkPort = regexp.MustCompile("^.*:[0-9]+$")
   133  	return checkPort.MatchString(host)
   134  }
   135  
   136  type TransferDetails struct {
   137  	// Url is the url.URL of the cache and port
   138  	Url url.URL
   139  
   140  	// Proxy specifies if a proxy should be used
   141  	Proxy bool
   142  }
   143  
   144  // NewTransferDetails creates the TransferDetails struct with the given cache
   145  func NewTransferDetails(cache namespaces.Cache, https bool) []TransferDetails {
   146  	details := make([]TransferDetails, 0)
   147  	var cacheEndpoint string
   148  	if https {
   149  		cacheEndpoint = cache.AuthEndpoint
   150  	} else {
   151  		cacheEndpoint = cache.Endpoint
   152  	}
   153  
   154  	// Form the URL
   155  	cacheURL, err := url.Parse(cacheEndpoint)
   156  	if err != nil {
   157  		log.Errorln("Failed to parse cache:", cache, "error:", err)
   158  		return nil
   159  	}
   160  	if cacheURL.Host == "" {
   161  		// Assume the cache is just a hostname
   162  		cacheURL.Host = cacheEndpoint
   163  		cacheURL.Path = ""
   164  		cacheURL.Scheme = ""
   165  		cacheURL.Opaque = ""
   166  	}
   167  	log.Debugf("Parsed Cache: %s\n", cacheURL.String())
   168  	if https {
   169  		cacheURL.Scheme = "https"
   170  		if !HasPort(cacheURL.Host) {
   171  			// Add port 8444 and 8443
   172  			cacheURL.Host += ":8444"
   173  			details = append(details, TransferDetails{
   174  				Url:   *cacheURL,
   175  				Proxy: false,
   176  			})
   177  			// Strip the port off and add 8443
   178  			cacheURL.Host = cacheURL.Host[:len(cacheURL.Host)-5] + ":8443"
   179  		}
   180  		// Whether port is specified or not, add a transfer without proxy
   181  		details = append(details, TransferDetails{
   182  			Url:   *cacheURL,
   183  			Proxy: false,
   184  		})
   185  	} else {
   186  		cacheURL.Scheme = "http"
   187  		if !HasPort(cacheURL.Host) {
   188  			cacheURL.Host += ":8000"
   189  		}
   190  		isProxyEnabled := IsProxyEnabled()
   191  		details = append(details, TransferDetails{
   192  			Url:   *cacheURL,
   193  			Proxy: isProxyEnabled,
   194  		})
   195  		if isProxyEnabled && CanDisableProxy() {
   196  			details = append(details, TransferDetails{
   197  				Url:   *cacheURL,
   198  				Proxy: false,
   199  			})
   200  		}
   201  	}
   202  
   203  	return details
   204  }
   205  
   206  type TransferResults struct {
   207  	Error      error
   208  	Downloaded int64
   209  }
   210  
   211  type CacheInterface interface{}
   212  
   213  func GenerateTransferDetailsUsingCache(cache CacheInterface, needsToken bool) []TransferDetails {
   214  	if directorCache, ok := cache.(namespaces.DirectorCache); ok {
   215  		return NewTransferDetailsUsingDirector(directorCache, needsToken)
   216  	} else if cache, ok := cache.(namespaces.Cache); ok {
   217  		return NewTransferDetails(cache, needsToken)
   218  	}
   219  	return nil
   220  }
   221  
   222  func download_http(source string, destination string, payload *payloadStruct, namespace namespaces.Namespace, recursive bool, tokenName string, OSDFDirectorUrl string) (bytesTransferred int64, err error) {
   223  
   224  	// First, create a handler for any panics that occur
   225  	defer func() {
   226  		if r := recover(); r != nil {
   227  			log.Errorln("Panic occurred in download_http:", r)
   228  			ret := fmt.Sprintf("Unrecoverable error (panic) occurred in download_http: %v", r)
   229  			err = errors.New(ret)
   230  			bytesTransferred = 0
   231  
   232  			// Attempt to add the panic to the error accumulator
   233  			AddError(errors.New(ret))
   234  		}
   235  	}()
   236  
   237  	// Generate the downloadUrl
   238  	var token string
   239  	if namespace.UseTokenOnRead {
   240  		var err error
   241  		sourceUrl := url.URL{Path: source}
   242  		token, err = getToken(&sourceUrl, namespace, false, tokenName)
   243  		if err != nil {
   244  			log.Errorln("Failed to get token though required to read from this namespace:", err)
   245  			return 0, err
   246  		}
   247  	}
   248  
   249  	// Check the env var "USE_OSDF_DIRECTOR" and decide if ordered caches should come from director
   250  	var transfers []TransferDetails
   251  	var files []string
   252  	var closestNamespaceCaches []CacheInterface
   253  	if OSDFDirectorUrl != "" {
   254  		log.Debugln("Using OSDF Director at ", OSDFDirectorUrl)
   255  		closestNamespaceCaches = make([]CacheInterface, len(namespace.SortedDirectorCaches))
   256  		for i, v := range namespace.SortedDirectorCaches {
   257  			closestNamespaceCaches[i] = v
   258  		}
   259  	} else {
   260  		tmpCaches, err := GetCachesFromNamespace(namespace)
   261  		if err != nil {
   262  			log.Errorln("Failed to get namespaced caches (treated as non-fatal):", err)
   263  		}
   264  
   265  		closestNamespaceCaches = make([]CacheInterface, len(tmpCaches))
   266  		for i, v := range tmpCaches {
   267  			closestNamespaceCaches[i] = v
   268  		}
   269  	}
   270  	log.Debugln("Matched caches:", closestNamespaceCaches)
   271  
   272  	// Make sure we only try as many caches as we have
   273  	cachesToTry := CachesToTry
   274  	if cachesToTry > len(closestNamespaceCaches) {
   275  		cachesToTry = len(closestNamespaceCaches)
   276  	}
   277  	log.Debugln("Trying the caches:", closestNamespaceCaches[:cachesToTry])
   278  	downloadUrl := url.URL{Path: source}
   279  
   280  	if recursive {
   281  		var err error
   282  		files, err = walkDavDir(&downloadUrl, token, namespace)
   283  		if err != nil {
   284  			log.Errorln("Error from walkDavDir", err)
   285  			return 0, err
   286  		}
   287  	} else {
   288  		files = append(files, source)
   289  	}
   290  
   291  	for _, cache := range closestNamespaceCaches[:cachesToTry] {
   292  		// Parse the cache URL
   293  		log.Debugln("Cache:", cache)
   294  		transfers = append(transfers, GenerateTransferDetailsUsingCache(cache, namespace.ReadHTTPS || namespace.UseTokenOnRead)...)
   295  	}
   296  
   297  	if len(transfers) > 0 {
   298  		log.Debugln("Transfers:", transfers[0].Url.Opaque)
   299  	} else {
   300  		log.Debugln("No transfers possible as no caches are found")
   301  		return 0, errors.New("No transfers possible as no caches are found")
   302  	}
   303  	// Create the wait group and the transfer files
   304  	var wg sync.WaitGroup
   305  
   306  	workChan := make(chan string)
   307  	results := make(chan TransferResults, len(files))
   308  	//tf := TransferFiles{files: files}
   309  
   310  	// Start the workers
   311  	for i := 1; i <= 5; i++ {
   312  		wg.Add(1)
   313  		go startDownloadWorker(source, destination, token, transfers, &wg, workChan, results)
   314  	}
   315  
   316  	// For each file, send it to the worker
   317  	for _, file := range files {
   318  		workChan <- file
   319  	}
   320  	close(workChan)
   321  
   322  	// Wait for all the transfers to complete
   323  	wg.Wait()
   324  
   325  	var downloaded int64
   326  	var downloadError error = nil
   327  	// Every transfer should send a TransferResults to the results channel
   328  	for i := 0; i < len(files); i++ {
   329  		select {
   330  		case result := <-results:
   331  			downloaded += result.Downloaded
   332  			if result.Error != nil {
   333  				downloadError = result.Error
   334  			}
   335  		default:
   336  			// Didn't get a result, that's weird
   337  			downloadError = errors.New("failed to get outputs from one of the transfers")
   338  		}
   339  	}
   340  
   341  	return downloaded, downloadError
   342  
   343  }
   344  
   345  func startDownloadWorker(source string, destination string, token string, transfers []TransferDetails, wg *sync.WaitGroup, workChan <-chan string, results chan<- TransferResults) {
   346  
   347  	defer wg.Done()
   348  	var success bool
   349  	for file := range workChan {
   350  		// Remove the source from the file path
   351  		newFile := strings.Replace(file, source, "", 1)
   352  		finalDest := path.Join(destination, newFile)
   353  		directory := path.Dir(finalDest)
   354  		var downloaded int64
   355  		err := os.MkdirAll(directory, 0700)
   356  		if err != nil {
   357  			results <- TransferResults{Error: errors.New("Failed to make directory:" + directory)}
   358  			continue
   359  		}
   360  		for _, transfer := range transfers {
   361  			transfer.Url.Path = file
   362  			log.Debugln("Constructed URL:", transfer.Url.String())
   363  			if downloaded, err = DownloadHTTP(transfer, finalDest, token); err != nil {
   364  				log.Debugln("Failed to download:", err)
   365  				var ope *net.OpError
   366  				var cse *ConnectionSetupError
   367  				errorString := "Failed to download from " + transfer.Url.Hostname() + ":" +
   368  					transfer.Url.Port() + " "
   369  				if errors.As(err, &ope) && ope.Op == "proxyconnect" {
   370  					log.Debugln(ope)
   371  					AddrString, _ := os.LookupEnv("http_proxy")
   372  					if ope.Addr != nil {
   373  						AddrString = " " + ope.Addr.String()
   374  					}
   375  					errorString += "due to proxy " + AddrString + " error: " + ope.Unwrap().Error()
   376  				} else if errors.As(err, &cse) {
   377  					errorString += "+ proxy=" + strconv.FormatBool(transfer.Proxy) + ": "
   378  					if sce, ok := cse.Unwrap().(grab.StatusCodeError); ok {
   379  						errorString += sce.Error()
   380  					} else {
   381  						errorString += err.Error()
   382  					}
   383  				} else {
   384  					errorString += "+ proxy=" + strconv.FormatBool(transfer.Proxy) +
   385  						": " + err.Error()
   386  				}
   387  				AddError(&FileDownloadError{errorString, err})
   388  				continue
   389  			} else {
   390  				log.Debugln("Downloaded bytes:", downloaded)
   391  				success = true
   392  				break
   393  			}
   394  
   395  		}
   396  		if !success {
   397  			log.Debugln("Failed to download with HTTP")
   398  			results <- TransferResults{Error: errors.New("failed to download with HTTP")}
   399  			return
   400  		} else {
   401  			results <- TransferResults{
   402  				Downloaded: downloaded,
   403  				Error:      nil,
   404  			}
   405  		}
   406  	}
   407  }
   408  
   409  func parseTransferStatus(status string) (int, string) {
   410  	parts := strings.SplitN(status, ": ", 2)
   411  	if len(parts) != 2 {
   412  		return 0, ""
   413  	}
   414  
   415  	statusCode, err := strconv.Atoi(strings.TrimSpace(parts[0]))
   416  	if err != nil {
   417  		return 0, ""
   418  	}
   419  
   420  	return statusCode, strings.TrimSpace(parts[1])
   421  }
   422  
   423  // DownloadHTTP - Perform the actual download of the file
   424  func DownloadHTTP(transfer TransferDetails, dest string, token string) (int64, error) {
   425  
   426  	// Create the client, request, and context
   427  	client := grab.NewClient()
   428  	transport := http.Transport{
   429  		Proxy: http.ProxyFromEnvironment,
   430  		DialContext: (&net.Dialer{
   431  			Timeout:   10 * time.Second,
   432  			KeepAlive: 30 * time.Second,
   433  		}).DialContext,
   434  		MaxIdleConns:          30,
   435  		IdleConnTimeout:       90 * time.Second,
   436  		TLSHandshakeTimeout:   15 * time.Second,
   437  		ExpectContinueTimeout: 1 * time.Second,
   438  		ResponseHeaderTimeout: 10 * time.Second,
   439  	}
   440  	if !transfer.Proxy {
   441  		transport.Proxy = nil
   442  	}
   443  	client.HTTPClient.Transport = &transport
   444  
   445  	ctx, cancel := context.WithCancel(context.Background())
   446  	defer cancel()
   447  	log.Debugln("Transfer URL String:", transfer.Url.String())
   448  	req, _ := grab.NewRequest(dest, transfer.Url.String())
   449  	if token != "" {
   450  		req.HTTPRequest.Header.Set("Authorization", "Bearer "+token)
   451  	}
   452  	// Set the headers
   453  	req.HTTPRequest.Header.Set("X-Transfer-Status", "true")
   454  	req.HTTPRequest.Header.Set("TE", "trailers")
   455  	req.WithContext(ctx)
   456  
   457  	// Test the transfer speed every 5 seconds
   458  	t := time.NewTicker(5000 * time.Millisecond)
   459  	defer t.Stop()
   460  
   461  	// Progress ticker
   462  	progressTicker := time.NewTicker(500 * time.Millisecond)
   463  	defer progressTicker.Stop()
   464  
   465  	// Store the last downloaded amount, and the bottom limit of the download
   466  	// Check the environment variable STASHCP_MINIMUM_DOWNLOAD_SPEED
   467  	downloadLimitStr := os.Getenv("STASHCP_MINIMUM_DOWNLOAD_SPEED")
   468  	var downloadLimit int64 = 1024 * 100
   469  	if downloadLimitStr != "" {
   470  		var err error
   471  		downloadLimit, err = strconv.ParseInt(downloadLimitStr, 10, 64)
   472  		if err != nil {
   473  			log.Errorln("Environment variable STASHCP_MINIMUM_DOWNLOAD_SPEED=", downloadLimitStr, " is not parsable as integer:", err, "defaulting to 1MB/s")
   474  		}
   475  	}
   476  	// If we are doing a recursive, decrease the download limit by the number of likely workers ~5
   477  	if Options.Recursive {
   478  		downloadLimit /= 5
   479  	}
   480  
   481  	// Start the transfer
   482  	log.Debugln("Starting the HTTP transfer...")
   483  	filename := path.Base(dest)
   484  	resp := client.Do(req)
   485  	// Check the error real quick
   486  	if resp.IsComplete() {
   487  		if err := resp.Err(); err != nil {
   488  			log.Errorln("Failed to download:", err)
   489  			return 0, &ConnectionSetupError{Err: err}
   490  		}
   491  	}
   492  
   493  	var progressBar *mpb.Bar
   494  	if Options.ProgressBars {
   495  		progressBar = p.AddBar(0,
   496  			mpb.PrependDecorators(
   497  				decor.Name(filename, decor.WCSyncSpaceR),
   498  				decor.CountersKibiByte("% .2f / % .2f"),
   499  			),
   500  			mpb.AppendDecorators(
   501  				decor.EwmaETA(decor.ET_STYLE_GO, 90),
   502  				decor.Name(" ] "),
   503  				decor.EwmaSpeed(decor.UnitKiB, "% .2f", 20),
   504  			),
   505  		)
   506  	}
   507  
   508  	var previousCompletedBytes int64 = 0
   509  	var previousCompletedTime = time.Now()
   510  	var startBelowLimit int64 = 0
   511  
   512  	var noProgressStartTime time.Time
   513  	var lastBytesComplete int64
   514  	// Loop of the download
   515  Loop:
   516  	for {
   517  		select {
   518  		case <-progressTicker.C:
   519  			if Options.ProgressBars {
   520  				progressBar.SetTotal(resp.Size, false)
   521  				currentCompletedBytes := resp.BytesComplete()
   522  				progressBar.IncrInt64(currentCompletedBytes - previousCompletedBytes)
   523  				previousCompletedBytes = currentCompletedBytes
   524  				currentCompletedTime := time.Now()
   525  				progressBar.DecoratorEwmaUpdate(currentCompletedTime.Sub(previousCompletedTime))
   526  				previousCompletedTime = currentCompletedTime
   527  			}
   528  
   529  		case <-t.C:
   530  			
   531  			if resp.BytesComplete() == lastBytesComplete {
   532  				if noProgressStartTime.IsZero() {
   533  					noProgressStartTime = time.Now()
   534  				} else if time.Since(noProgressStartTime) > 100 * time.Second {
   535  					errMsg := "No progress for more than " + (time.Since(noProgressStartTime)/time.Second).String() + " seconds."
   536  					log.Errorln(errMsg)
   537  					return 5, &StoppedTransferError{
   538  						Err: errMsg,
   539  					}
   540  				}
   541  			} else {
   542  				noProgressStartTime = time.Time{}
   543  			}
   544  			lastBytesComplete = resp.BytesComplete()
   545  
   546  
   547  			// Check if we are downloading fast enough
   548  			if resp.BytesPerSecond() < float64(downloadLimit) {
   549  				// Give the download 120 seconds to start
   550  				if resp.Duration() < time.Second*120 {
   551  					continue
   552  				} else if startBelowLimit == 0 {
   553  					log.Warnln("Download speed of ", resp.BytesPerSecond(), "bytes/s", " is below the limit of", downloadLimit, "bytes/s")
   554  					startBelowLimit = time.Now().Unix()
   555  					continue
   556  				} else if (time.Now().Unix() - startBelowLimit) < 30 {
   557  					// If the download is below the threshold for less than 30 seconds, continue
   558  					continue
   559  				}
   560  				// The download is below the threshold for more than 30 seconds, cancel the download
   561  				cancel()
   562  				if Options.ProgressBars {
   563  					var cancelledProgressBar = p.AddBar(0,
   564  						mpb.BarQueueAfter(progressBar),
   565  						mpb.BarFillerClearOnComplete(),
   566  						mpb.PrependDecorators(
   567  							decor.Name(filename, decor.WC{W: len(filename) + 1, C: decor.DidentRight}),
   568  							decor.OnComplete(decor.Name(filename, decor.WCSyncSpaceR), "cancelled, too slow!"),
   569  							decor.OnComplete(decor.EwmaETA(decor.ET_STYLE_MMSS, 0, decor.WCSyncWidth), ""),
   570  						),
   571  						mpb.AppendDecorators(
   572  							decor.OnComplete(decor.Percentage(decor.WC{W: 5}), ""),
   573  						),
   574  					)
   575  					progressBar.SetTotal(resp.Size, true)
   576  					cancelledProgressBar.SetTotal(resp.Size, true)
   577  				}
   578  
   579  				log.Errorln("Download speed of ", resp.BytesPerSecond(), "bytes/s", " is below the limit of", downloadLimit, "bytes/s")
   580  				
   581  				return 0, &SlowTransferError{
   582  					BytesTransferred: resp.BytesComplete(),
   583  					BytesPerSecond:   int64(resp.BytesPerSecond()),
   584  					Duration:         resp.Duration(),
   585  					BytesTotal:       resp.Size,
   586  				}
   587  
   588  			} else {
   589  				// The download is fast enough, reset the startBelowLimit
   590  				startBelowLimit = 0
   591  			}
   592  
   593  		case <-resp.Done:
   594  			// download is complete
   595  			if Options.ProgressBars {
   596  				downloadError := resp.Err()
   597  				completeMsg := "done!"
   598  				if downloadError != nil {
   599  					completeMsg = downloadError.Error()
   600  				}
   601  				var doneProgressBar = p.AddBar(resp.Size,
   602  					mpb.BarQueueAfter(progressBar),
   603  					mpb.BarFillerClearOnComplete(),
   604  					mpb.PrependDecorators(
   605  						decor.Name(filename, decor.WC{W: len(filename) + 1, C: decor.DidentRight}),
   606  						decor.OnComplete(decor.Name(filename, decor.WCSyncSpaceR), completeMsg),
   607  						decor.OnComplete(decor.EwmaETA(decor.ET_STYLE_MMSS, 0, decor.WCSyncWidth), ""),
   608  					),
   609  					mpb.AppendDecorators(
   610  						decor.OnComplete(decor.Percentage(decor.WC{W: 5}), ""),
   611  					),
   612  				)
   613  
   614  				progressBar.SetTotal(resp.Size, true)
   615  				doneProgressBar.SetTotal(resp.Size, true)
   616  			}
   617  			break Loop
   618  		}
   619  	}
   620  	//fmt.Printf("\nDownload saved to", resp.Filename)
   621  	err := resp.Err()
   622  	if err != nil {
   623  		// Connection errors
   624  		if errors.Is(err, syscall.ECONNREFUSED) ||
   625  			errors.Is(err, syscall.ECONNRESET) ||
   626  			errors.Is(err, syscall.ECONNABORTED) {
   627  			return 0, &ConnectionSetupError{URL: resp.Request.URL().String()}
   628  		}
   629  		log.Debugln("Got error from HTTP download", err)
   630  		return 0, err
   631  	} else {
   632  		// Check the trailers for any error information
   633  		trailer := resp.HTTPResponse.Trailer
   634  		if errorStatus := trailer.Get("X-Transfer-Status"); errorStatus != "" {
   635  			statusCode, statusText := parseTransferStatus(errorStatus)
   636  			if statusCode != 200 {
   637  				log.Debugln("Got error from file transfer")
   638  				return 0, errors.New("transfer error: " + statusText)
   639  			}
   640  		}
   641  	}
   642  	// Valid responses include 200 and 206.  The latter occurs if the download was resumed after a
   643  	// prior attempt.
   644  	if resp.HTTPResponse.StatusCode != 200 && resp.HTTPResponse.StatusCode != 206 {
   645  		log.Debugln("Got failure status code:", resp.HTTPResponse.StatusCode)
   646  		return 0, errors.New("failure status code")
   647  	}
   648  	log.Debugln("HTTP Transfer was successful")
   649  	return resp.BytesComplete(), nil
   650  }
   651  
   652  // ProgressReader wraps the io.Reader to get progress
   653  // Adapted from https://stackoverflow.com/questions/26050380/go-tracking-post-request-progress
   654  type ProgressReader struct {
   655  	file   *os.File
   656  	read   int64
   657  	size   int64
   658  	closed chan bool
   659  }
   660  
   661  // Read implements the common read function for io.Reader
   662  func (pr *ProgressReader) Read(p []byte) (n int, err error) {
   663  	n, err = pr.file.Read(p)
   664  	atomic.AddInt64(&pr.read, int64(n))
   665  	return n, err
   666  }
   667  
   668  // Close implments the close function of io.Closer
   669  func (pr *ProgressReader) Close() error {
   670  	err := pr.file.Close()
   671  	// Also, send the closed channel a message
   672  	pr.closed <- true
   673  	return err
   674  }
   675  
   676  // UploadFile Uploads a file using HTTP
   677  func UploadFile(src string, dest *url.URL, token string, namespace namespaces.Namespace) (int64, error) {
   678  
   679  	log.Debugln("In UploadFile")
   680  	log.Debugln("Dest", dest.String())
   681  	// Try opening the file to send
   682  	file, err := os.Open(src)
   683  	if err != nil {
   684  		log.Errorln("Error opening local file:", err)
   685  		return 0, err
   686  	}
   687  	// Stat the file to get the size (for progress bar)
   688  	fileInfo, err := file.Stat()
   689  	if err != nil {
   690  		log.Errorln("Error stating local file ", src, ":", err)
   691  		return 0, err
   692  	}
   693  	// Parse the writeback host as a URL
   694  	writebackhostUrl, err := url.Parse(namespace.WriteBackHost)
   695  	if err != nil {
   696  		return 0, err
   697  	}
   698  	dest.Host = writebackhostUrl.Host
   699  	dest.Scheme = "https"
   700  
   701  	// Check if the destination is a directory
   702  	isDestDir, err := IsDir(dest, token, namespace)
   703  	if err != nil {
   704  		log.Warnln("Received an error from checking if dest was a directory.  Going to continue as if there was no error")
   705  	}
   706  	if isDestDir {
   707  		// Set the destination as the basename of the source
   708  		dest.Path = path.Join(dest.Path, path.Base(src))
   709  		log.Debugln("Destination", dest.Path, "is a directory")
   710  	}
   711  
   712  	// Create the wrapped reader and send it to the request
   713  	closed := make(chan bool, 1)
   714  	errorChan := make(chan error, 1)
   715  	responseChan := make(chan *http.Response)
   716  	reader := &ProgressReader{file, 0, fileInfo.Size(), closed}
   717  	putContext, cancel := context.WithCancel(context.Background())
   718  	defer cancel()
   719  	log.Debugln("Full destination URL:", dest.String())
   720  	var request *http.Request
   721  	// For files that are 0 length, we need to send a PUT request with an nil body
   722  	if fileInfo.Size() > 0 {
   723  		request, err = http.NewRequestWithContext(putContext, "PUT", dest.String(), reader)
   724  	} else {
   725  		request, err = http.NewRequestWithContext(putContext, "PUT", dest.String(), http.NoBody)
   726  	}
   727  	if err != nil {
   728  		log.Errorln("Error creating request:", err)
   729  		return 0, err
   730  	}
   731  	request.ContentLength = fileInfo.Size()
   732  	// Set the authorization header
   733  	request.Header.Set("Authorization", "Bearer "+token)
   734  	var lastKnownWritten int64
   735  	t := time.NewTicker(20 * time.Second)
   736  	defer t.Stop()
   737  	go doPut(request, responseChan, errorChan)
   738  	var lastError error = nil
   739  
   740  	// Do the select on a ticker, and the writeChan
   741  Loop:
   742  	for {
   743  		select {
   744  		case <-t.C:
   745  			// If we are not making any progress, if we haven't written 1MB in the last 5 seconds
   746  			currentRead := atomic.LoadInt64(&reader.read)
   747  			log.Debugln("Current read:", currentRead)
   748  			log.Debugln("Last known written:", lastKnownWritten)
   749  			if lastKnownWritten < currentRead {
   750  				// We have made progress!
   751  				lastKnownWritten = currentRead
   752  			} else {
   753  				// No progress has been made in the last 1 second
   754  				log.Errorln("No progress made in last 5 second in upload")
   755  				lastError = errors.New("upload cancelled, no progress in 5 seconds")
   756  				break Loop
   757  			}
   758  
   759  		case <-closed:
   760  			// The file has been closed, we're done here
   761  			log.Debugln("File closed")
   762  		case response := <-responseChan:
   763  			if response.StatusCode != 200 {
   764  				log.Errorln("Got failure status code:", response.StatusCode)
   765  				lastError = errors.New("failure status code")
   766  				break Loop
   767  			}
   768  			break Loop
   769  
   770  		case err := <-errorChan:
   771  			log.Warningln("Unexpected error when performing upload:", err)
   772  			lastError = err
   773  			break Loop
   774  
   775  		}
   776  	}
   777  
   778  	if fileInfo.Size() == 0 {
   779  		return 0, lastError
   780  	} else {
   781  		return atomic.LoadInt64(&reader.read), lastError
   782  	}
   783  
   784  }
   785  
   786  var UploadClient = &http.Client{}
   787  
   788  // Actually perform the Put request to the server
   789  func doPut(request *http.Request, responseChan chan<- *http.Response, errorChan chan<- error) {
   790  	client := UploadClient
   791  	dump, _ := httputil.DumpRequestOut(request, false)
   792  	log.Debugf("Dumping request: %s", dump)
   793  	response, err := client.Do(request)
   794  	if err != nil {
   795  		log.Errorln("Error with PUT:", err)
   796  		errorChan <- err
   797  		return
   798  	}
   799  	dump, _ = httputil.DumpResponse(response, true)
   800  	log.Debugf("Dumping response: %s", dump)
   801  	if response.StatusCode != 200 {
   802  		log.Errorln("Error status code:", response.Status)
   803  		log.Debugln("From the server:")
   804  		textResponse, err := io.ReadAll(response.Body)
   805  		if err != nil {
   806  			log.Errorln("Error reading response from server:", err)
   807  			responseChan <- response
   808  			return
   809  		}
   810  		log.Debugln(string(textResponse))
   811  	}
   812  	responseChan <- response
   813  
   814  }
   815  
   816  func IsDir(dirUrl *url.URL, token string, namespace namespaces.Namespace) (bool, error) {
   817  	connectUrl := url.URL{}
   818  	if namespace.DirListHost != "" {
   819  		// Parse the dir list host
   820  		dirListURL, err := url.Parse(namespace.DirListHost)
   821  		if err != nil {
   822  			log.Errorln("Failed to parse dirlisthost from namespaces into URL:", err)
   823  			return false, err
   824  		}
   825  		connectUrl = *dirListURL
   826  
   827  	} else {
   828  		//rootUrl.Path = ""
   829  		connectUrl.Host = "stash.osgconnect.net:1094"
   830  		connectUrl.Scheme = "http"
   831  	}
   832  
   833  	c := gowebdav.NewClient(connectUrl.String(), "", "")
   834  	//c.SetHeader("Authorization", "Bearer "+token)
   835  
   836  	// The path can have special characters in it like '#' and '?', so we have to collect
   837  	// the path parts and join them together
   838  	finalPath := dirUrl.Path
   839  	if dirUrl.RawQuery != "" {
   840  		finalPath += "?" + dirUrl.RawQuery
   841  	}
   842  	if dirUrl.Fragment != "" {
   843  		finalPath += "#" + dirUrl.Fragment
   844  	}
   845  	log.Debugln("Final webdav checked path:", finalPath)
   846  	info, err := c.Stat(finalPath)
   847  	if err != nil {
   848  		log.Debugln("Failed to ReadDir:", err, "for URL:", dirUrl.String())
   849  		return false, err
   850  	}
   851  	log.Debugln("Got isDir response:", info.IsDir())
   852  	return info.IsDir(), nil
   853  
   854  }
   855  
   856  func walkDavDir(url *url.URL, token string, namespace namespaces.Namespace) ([]string, error) {
   857  
   858  	// First, check if the url is a directory
   859  	isDir, err := IsDir(url, token, namespace)
   860  	if err != nil {
   861  		log.Errorln("Failed to check if path", url.Path, " is directory:", err)
   862  		return nil, err
   863  	}
   864  	if !isDir {
   865  		log.Errorln("Path ", url.Path, " is not a directory.")
   866  		return nil, errors.New("path " + url.Path + " is not a directory")
   867  	}
   868  
   869  	// Create the client to walk the filesystem
   870  	rootUrl := *url
   871  	if namespace.DirListHost != "" {
   872  		// Parse the dir list host
   873  		dirListURL, err := url.Parse(namespace.DirListHost)
   874  		if err != nil {
   875  			log.Errorln("Failed to parse dirlisthost from namespaces into URL:", err)
   876  			return nil, err
   877  		}
   878  		rootUrl = *dirListURL
   879  
   880  	} else {
   881  		rootUrl.Path = ""
   882  		rootUrl.Host = "stash.osgconnect.net:1094"
   883  		rootUrl.Scheme = "http"
   884  	}
   885  	log.Debugln("Dir list host: ", rootUrl.String())
   886  	c := gowebdav.NewClient(rootUrl.String(), "", "")
   887  
   888  	// XRootD does not like keep alives and kills things, so turn them off.
   889  	transport := http.Transport{
   890  		Proxy: http.ProxyFromEnvironment,
   891  		DialContext: (&net.Dialer{
   892  			Timeout:   10 * time.Second,
   893  			KeepAlive: 30 * time.Second,
   894  		}).DialContext,
   895  		TLSHandshakeTimeout: 15 * time.Second,
   896  		DisableKeepAlives:   true,
   897  	}
   898  	c.SetTransport(&transport)
   899  
   900  	files, err := walkDir(url.Path, c)
   901  	log.Debugln("Found files:", files)
   902  	return files, err
   903  
   904  }
   905  
   906  func walkDir(path string, client *gowebdav.Client) ([]string, error) {
   907  	var files []string
   908  	log.Debugln("Reading directory: ", path)
   909  	infos, err := client.ReadDir(path)
   910  	if err != nil {
   911  		return nil, err
   912  	}
   913  	for _, info := range infos {
   914  		newPath := path + "/" + info.Name()
   915  		if info.IsDir() {
   916  			returnedFiles, err := walkDir(newPath, client)
   917  			if err != nil {
   918  				return nil, err
   919  			}
   920  			files = append(files, returnedFiles...)
   921  		} else {
   922  			// It is a normal file
   923  			files = append(files, newPath)
   924  		}
   925  	}
   926  	return files, nil
   927  }