github.com/ahmet2mir/goreleaser@v0.180.3-0.20210927151101-8e5ee5a9b8c5/internal/pipe/blob/upload.go (about)

     1  package blob
     2  
     3  import (
     4  	"fmt"
     5  	"io"
     6  	"net/url"
     7  	"os"
     8  	"path"
     9  	"strings"
    10  
    11  	"github.com/apex/log"
    12  	"github.com/goreleaser/goreleaser/internal/artifact"
    13  	"github.com/goreleaser/goreleaser/internal/extrafiles"
    14  	"github.com/goreleaser/goreleaser/internal/semerrgroup"
    15  	"github.com/goreleaser/goreleaser/internal/tmpl"
    16  	"github.com/goreleaser/goreleaser/pkg/config"
    17  	"github.com/goreleaser/goreleaser/pkg/context"
    18  	"gocloud.dev/blob"
    19  	"gocloud.dev/secrets"
    20  
    21  	// Import the blob packages we want to be able to open.
    22  	_ "gocloud.dev/blob/azureblob"
    23  	_ "gocloud.dev/blob/gcsblob"
    24  	_ "gocloud.dev/blob/s3blob"
    25  
    26  	// import the secrets packages we want to be able to be used.
    27  	_ "gocloud.dev/secrets/awskms"
    28  	_ "gocloud.dev/secrets/azurekeyvault"
    29  	_ "gocloud.dev/secrets/gcpkms"
    30  )
    31  
    32  func urlFor(ctx *context.Context, conf config.Blob) (string, error) {
    33  	bucket, err := tmpl.New(ctx).Apply(conf.Bucket)
    34  	if err != nil {
    35  		return "", err
    36  	}
    37  
    38  	bucketURL := fmt.Sprintf("%s://%s", conf.Provider, bucket)
    39  
    40  	if conf.Provider != "s3" {
    41  		return bucketURL, nil
    42  	}
    43  
    44  	query := url.Values{}
    45  	if conf.Endpoint != "" {
    46  		query.Add("endpoint", conf.Endpoint)
    47  		query.Add("s3ForcePathStyle", "true")
    48  	}
    49  	if conf.Region != "" {
    50  		query.Add("region", conf.Region)
    51  	}
    52  	if conf.DisableSSL {
    53  		query.Add("disableSSL", "true")
    54  	}
    55  
    56  	if len(query) > 0 {
    57  		bucketURL = bucketURL + "?" + query.Encode()
    58  	}
    59  
    60  	return bucketURL, nil
    61  }
    62  
    63  // Takes goreleaser context(which includes artificats) and bucketURL for
    64  // upload to destination (eg: gs://gorelease-bucket) using the given uploader
    65  // implementation.
    66  func doUpload(ctx *context.Context, conf config.Blob) error {
    67  	folder, err := tmpl.New(ctx).Apply(conf.Folder)
    68  	if err != nil {
    69  		return err
    70  	}
    71  	folder = strings.TrimPrefix(folder, "/")
    72  
    73  	bucketURL, err := urlFor(ctx, conf)
    74  	if err != nil {
    75  		return err
    76  	}
    77  
    78  	filter := artifact.Or(
    79  		artifact.ByType(artifact.UploadableArchive),
    80  		artifact.ByType(artifact.UploadableBinary),
    81  		artifact.ByType(artifact.UploadableSourceArchive),
    82  		artifact.ByType(artifact.Checksum),
    83  		artifact.ByType(artifact.Signature),
    84  		artifact.ByType(artifact.LinuxPackage),
    85  	)
    86  	if len(conf.IDs) > 0 {
    87  		filter = artifact.And(filter, artifact.ByIDs(conf.IDs...))
    88  	}
    89  
    90  	up := &productionUploader{}
    91  	if err := up.Open(ctx, bucketURL); err != nil {
    92  		return handleError(err, bucketURL)
    93  	}
    94  	defer up.Close()
    95  
    96  	g := semerrgroup.New(ctx.Parallelism)
    97  	for _, artifact := range ctx.Artifacts.Filter(filter).List() {
    98  		artifact := artifact
    99  		g.Go(func() error {
   100  			// TODO: replace this with ?prefix=folder on the bucket url
   101  			dataFile := artifact.Path
   102  			uploadFile := path.Join(folder, artifact.Name)
   103  
   104  			err := uploadData(ctx, conf, up, dataFile, uploadFile, bucketURL)
   105  
   106  			return err
   107  		})
   108  	}
   109  
   110  	files, err := extrafiles.Find(conf.ExtraFiles)
   111  	if err != nil {
   112  		return err
   113  	}
   114  	for name, fullpath := range files {
   115  		name := name
   116  		fullpath := fullpath
   117  		g.Go(func() error {
   118  			uploadFile := path.Join(folder, name)
   119  
   120  			err := uploadData(ctx, conf, up, fullpath, uploadFile, bucketURL)
   121  
   122  			return err
   123  		})
   124  	}
   125  
   126  	return g.Wait()
   127  }
   128  
   129  func uploadData(ctx *context.Context, conf config.Blob, up uploader, dataFile, uploadFile, bucketURL string) error {
   130  	data, err := getData(ctx, conf, dataFile)
   131  	if err != nil {
   132  		return err
   133  	}
   134  
   135  	err = up.Upload(ctx, uploadFile, data)
   136  	if err != nil {
   137  		return handleError(err, bucketURL)
   138  	}
   139  	return err
   140  }
   141  
   142  func handleError(err error, url string) error {
   143  	switch {
   144  	case errorContains(err, "NoSuchBucket", "ContainerNotFound", "notFound"):
   145  		return fmt.Errorf("provided bucket does not exist: %s: %w", url, err)
   146  	case errorContains(err, "NoCredentialProviders"):
   147  		return fmt.Errorf("check credentials and access to bucket: %s: %w", url, err)
   148  	case errorContains(err, "InvalidAccessKeyId"):
   149  		return fmt.Errorf("aws access key id you provided does not exist in our records: %w", err)
   150  	case errorContains(err, "AuthenticationFailed"):
   151  		return fmt.Errorf("azure storage key you provided is not valid: %w", err)
   152  	case errorContains(err, "invalid_grant"):
   153  		return fmt.Errorf("google app credentials you provided is not valid: %w", err)
   154  	case errorContains(err, "no such host"):
   155  		return fmt.Errorf("azure storage account you provided is not valid: %w", err)
   156  	case errorContains(err, "ServiceCode=ResourceNotFound"):
   157  		return fmt.Errorf("missing azure storage key for provided bucket %s: %w", url, err)
   158  	default:
   159  		return fmt.Errorf("failed to write to bucket: %w", err)
   160  	}
   161  }
   162  
   163  func getData(ctx *context.Context, conf config.Blob, path string) ([]byte, error) {
   164  	data, err := os.ReadFile(path)
   165  	if err != nil {
   166  		return data, fmt.Errorf("failed to open file %s: %w", path, err)
   167  	}
   168  	if conf.KMSKey == "" {
   169  		return data, nil
   170  	}
   171  	keeper, err := secrets.OpenKeeper(ctx, conf.KMSKey)
   172  	if err != nil {
   173  		return data, fmt.Errorf("failed to open kms %s: %w", conf.KMSKey, err)
   174  	}
   175  	defer keeper.Close()
   176  	data, err = keeper.Encrypt(ctx, data)
   177  	if err != nil {
   178  		return data, fmt.Errorf("failed to encrypt with kms: %w", err)
   179  	}
   180  	return data, err
   181  }
   182  
   183  // uploader implements upload.
   184  type uploader interface {
   185  	io.Closer
   186  	Open(ctx *context.Context, url string) error
   187  	Upload(ctx *context.Context, path string, data []byte) error
   188  }
   189  
   190  // productionUploader actually do upload to.
   191  type productionUploader struct {
   192  	bucket *blob.Bucket
   193  }
   194  
   195  func (u *productionUploader) Close() error {
   196  	if u.bucket == nil {
   197  		return nil
   198  	}
   199  	return u.bucket.Close()
   200  }
   201  
   202  func (u *productionUploader) Open(ctx *context.Context, bucket string) error {
   203  	log.WithFields(log.Fields{
   204  		"bucket": bucket,
   205  	}).Debug("uploading")
   206  
   207  	conn, err := blob.OpenBucket(ctx, bucket)
   208  	if err != nil {
   209  		return err
   210  	}
   211  	u.bucket = conn
   212  	return nil
   213  }
   214  
   215  func (u *productionUploader) Upload(ctx *context.Context, filepath string, data []byte) (err error) {
   216  	log.WithField("path", filepath).Info("uploading")
   217  
   218  	opts := &blob.WriterOptions{
   219  		ContentDisposition: "attachment; filename=" + path.Base(filepath),
   220  	}
   221  	w, err := u.bucket.NewWriter(ctx, filepath, opts)
   222  	if err != nil {
   223  		return err
   224  	}
   225  	defer func() {
   226  		if cerr := w.Close(); err == nil {
   227  			err = cerr
   228  		}
   229  	}()
   230  	_, err = w.Write(data)
   231  	return
   232  }