github.com/pquerna/agent@v2.1.8+incompatible/agent/artifact_uploader.go (about)

     1  package agent
     2  
     3  import (
     4  	"crypto/sha1"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"os"
     9  	"path/filepath"
    10  	"strings"
    11  	"sync"
    12  	"time"
    13  
    14  	"github.com/buildkite/agent/api"
    15  	"github.com/buildkite/agent/glob"
    16  	"github.com/buildkite/agent/logger"
    17  	"github.com/buildkite/agent/pool"
    18  	"github.com/buildkite/agent/retry"
    19  )
    20  
    21  type ArtifactUploader struct {
    22  	// The APIClient that will be used when uploading jobs
    23  	APIClient *api.Client
    24  
    25  	// The ID of the Job
    26  	JobID string
    27  
    28  	// The path of the uploads
    29  	Paths string
    30  
    31  	// Where we'll be uploading artifacts
    32  	Destination string
    33  }
    34  
    35  func (a *ArtifactUploader) Upload() error {
    36  	// Create artifact structs for all the files we need to upload
    37  	artifacts, err := a.Collect()
    38  	if err != nil {
    39  		return err
    40  	}
    41  
    42  	if len(artifacts) == 0 {
    43  		logger.Info("No files matched paths: %s", a.Paths)
    44  	} else {
    45  		logger.Info("Found %d files that match \"%s\"", len(artifacts), a.Paths)
    46  
    47  		err := a.upload(artifacts)
    48  		if err != nil {
    49  			return err
    50  		}
    51  	}
    52  
    53  	return nil
    54  }
    55  
    56  func (a *ArtifactUploader) Collect() (artifacts []*api.Artifact, err error) {
    57  	globPaths := strings.Split(a.Paths, ";")
    58  
    59  	for _, globPath := range globPaths {
    60  		workingDirectory := glob.Root(globPath)
    61  		globPath = strings.TrimSpace(globPath)
    62  
    63  		if globPath != "" {
    64  			logger.Debug("Searching for %s", filepath.Join(workingDirectory, globPath))
    65  
    66  			files, err := glob.Glob(workingDirectory, globPath)
    67  			if err != nil {
    68  				return nil, err
    69  			}
    70  
    71  			for _, file := range files {
    72  				// Generate an absolute path for the artifact
    73  				absolutePath, err := filepath.Abs(file)
    74  				if err != nil {
    75  					return nil, err
    76  				}
    77  
    78  				if isDir, _ := glob.IsDir(absolutePath); isDir {
    79  					logger.Debug("Skipping directory %s", file)
    80  					continue
    81  				}
    82  
    83  				// Create a relative path (from the workingDirectory) to the artifact, by removing the
    84  				// first part of the absolutePath that is the workingDirectory.
    85  				relativePath := strings.Replace(absolutePath, workingDirectory, "", 1)
    86  
    87  				// Ensure the relativePath doesn't have a file seperator "/" as the first character
    88  				relativePath = strings.TrimPrefix(relativePath, string(os.PathSeparator))
    89  
    90  				// Build an artifact object using the paths we have.
    91  				artifact, err := a.build(relativePath, absolutePath, globPath)
    92  				if err != nil {
    93  					return nil, err
    94  				}
    95  
    96  				artifacts = append(artifacts, artifact)
    97  			}
    98  		}
    99  	}
   100  
   101  	return artifacts, nil
   102  }
   103  
   104  func (a *ArtifactUploader) build(relativePath string, absolutePath string, globPath string) (*api.Artifact, error) {
   105  	// Temporarily open the file to get it's size
   106  	file, err := os.Open(absolutePath)
   107  	if err != nil {
   108  		return nil, err
   109  	}
   110  	defer file.Close()
   111  
   112  	// Grab it's file info (which includes it's file size)
   113  	fileInfo, err := file.Stat()
   114  	if err != nil {
   115  		return nil, err
   116  	}
   117  
   118  	// Generate a sha1 checksum for the file
   119  	hash := sha1.New()
   120  	io.Copy(hash, file)
   121  	checksum := fmt.Sprintf("%x", hash.Sum(nil))
   122  
   123  	// Create our new artifact data structure
   124  	artifact := &api.Artifact{
   125  		Path:         relativePath,
   126  		AbsolutePath: absolutePath,
   127  		GlobPath:     globPath,
   128  		FileSize:     fileInfo.Size(),
   129  		Sha1Sum:      checksum,
   130  	}
   131  
   132  	return artifact, nil
   133  }
   134  
   135  func (a *ArtifactUploader) upload(artifacts []*api.Artifact) error {
   136  	var uploader Uploader
   137  
   138  	// Determine what uploader to use
   139  	if a.Destination != "" {
   140  		if strings.HasPrefix(a.Destination, "s3://") {
   141  			uploader = new(S3Uploader)
   142  		} else {
   143  			return errors.New("Unknown upload destination: " + a.Destination)
   144  		}
   145  	} else {
   146  		uploader = new(FormUploader)
   147  	}
   148  
   149  	// Setup the uploader
   150  	err := uploader.Setup(a.Destination, a.APIClient.DebugHTTP)
   151  	if err != nil {
   152  		return err
   153  	}
   154  
   155  	// Set the URL's of the artifacts based on the uploader
   156  	for _, artifact := range artifacts {
   157  		artifact.URL = uploader.URL(artifact)
   158  	}
   159  
   160  	// Create the artifacts on Buildkite
   161  	batchCreator := ArtifactBatchCreator{
   162  		APIClient:         a.APIClient,
   163  		JobID:             a.JobID,
   164  		Artifacts:         artifacts,
   165  		UploadDestination: a.Destination,
   166  	}
   167  	artifacts, err = batchCreator.Create()
   168  	if err != nil {
   169  		return err
   170  	}
   171  
   172  	// Prepare a concurrency pool to upload the artifacts
   173  	p := pool.New(pool.MaxConcurrencyLimit)
   174  	errors := []error{}
   175  	var errorsMutex sync.Mutex
   176  
   177  	// Create a wait group so we can make sure the uploader waits for all
   178  	// the artifact states to upload before finishing
   179  	var stateUploaderWaitGroup sync.WaitGroup
   180  	stateUploaderWaitGroup.Add(1)
   181  
   182  	// A map to keep track of artifact states and how many we've uploaded
   183  	artifactStates := make(map[string]string)
   184  	artifactStatesUploaded := 0
   185  	var artifactStatesMutex sync.Mutex
   186  
   187  	// Spin up a gourtine that'll uploading artifact statuses every few
   188  	// seconds in batches
   189  	go func() {
   190  		for artifactStatesUploaded < len(artifacts) {
   191  			statesToUpload := make(map[string]string)
   192  
   193  			// Grab all the states we need to upload, and remove
   194  			// them from the tracking map
   195  			//
   196  			// Since we mutate the artifactStates variable in
   197  			// multiple routines, we need to lock it to make sure
   198  			// nothing else is changing it at the same time.
   199  			artifactStatesMutex.Lock()
   200  			for id, state := range artifactStates {
   201  				statesToUpload[id] = state
   202  				delete(artifactStates, id)
   203  			}
   204  			artifactStatesMutex.Unlock()
   205  
   206  			if len(statesToUpload) > 0 {
   207  				artifactStatesUploaded += len(statesToUpload)
   208  				for id, state := range statesToUpload {
   209  					logger.Debug("Artifact `%s` has state `%s`", id, state)
   210  				}
   211  
   212  				// Update the states of the artifacts in bulk.
   213  				err = retry.Do(func(s *retry.Stats) error {
   214  					_, err = a.APIClient.Artifacts.Update(a.JobID, statesToUpload)
   215  					if err != nil {
   216  						logger.Warn("%s (%s)", err, s)
   217  					}
   218  
   219  					return err
   220  				}, &retry.Config{Maximum: 10, Interval: 5 * time.Second})
   221  
   222  				if err != nil {
   223  					logger.Error("Error uploading artifact states: %s", err)
   224  
   225  					// Track the error that was raised. We need to
   226  					// aquire a lock since we mutate the errors
   227  					// slice in mutliple routines.
   228  					errorsMutex.Lock()
   229  					errors = append(errors, err)
   230  					errorsMutex.Unlock()
   231  				}
   232  
   233  				logger.Debug("Uploaded %d artfact states (%d/%d)", len(statesToUpload), artifactStatesUploaded, len(artifacts))
   234  			}
   235  
   236  			// Check again for states to upload in a few seconds
   237  			time.Sleep(1 * time.Second)
   238  		}
   239  
   240  		stateUploaderWaitGroup.Done()
   241  	}()
   242  
   243  	for _, artifact := range artifacts {
   244  		// Create new instance of the artifact for the goroutine
   245  		// See: http://golang.org/doc/effective_go.html#channels
   246  		artifact := artifact
   247  
   248  		p.Spawn(func() {
   249  			// Show a nice message that we're starting to upload the file
   250  			logger.Info("Uploading artifact %s %s (%d bytes)", artifact.ID, artifact.Path, artifact.FileSize)
   251  
   252  			// Upload the artifact and then set the state depending
   253  			// on whether or not it passed. We'll retry the upload
   254  			// a couple of times before giving up.
   255  			err = retry.Do(func(s *retry.Stats) error {
   256  				err := uploader.Upload(artifact)
   257  				if err != nil {
   258  					logger.Warn("%s (%s)", err, s)
   259  				}
   260  
   261  				return err
   262  			}, &retry.Config{Maximum: 10, Interval: 5 * time.Second})
   263  
   264  			var state string
   265  
   266  			// Did the upload eventually fail?
   267  			if err != nil {
   268  				logger.Error("Error uploading artifact \"%s\": %s", artifact.Path, err)
   269  
   270  				// Track the error that was raised. We need to
   271  				// aquire a lock since we mutate the errors
   272  				// slice in mutliple routines.
   273  				errorsMutex.Lock()
   274  				errors = append(errors, err)
   275  				errorsMutex.Unlock()
   276  
   277  				state = "error"
   278  			} else {
   279  				state = "finished"
   280  			}
   281  
   282  			// Since we mutate the artifactStates variable in
   283  			// multiple routines, we need to lock it to make sure
   284  			// nothing else is changing it at the same time.
   285  			artifactStatesMutex.Lock()
   286  			artifactStates[artifact.ID] = state
   287  			artifactStatesMutex.Unlock()
   288  		})
   289  	}
   290  
   291  	// Wait for the pool to finish
   292  	p.Wait()
   293  
   294  	// Wait for the statuses to finish uploading
   295  	stateUploaderWaitGroup.Wait()
   296  
   297  	if len(errors) > 0 {
   298  		logger.Fatal("There were errors with uploading some of the artifacts")
   299  	}
   300  
   301  	return nil
   302  }