github.com/goreleaser/goreleaser@v1.25.1/internal/pipe/blob/upload.go (about)

     1  package blob
     2  
     3  import (
     4  	"fmt"
     5  	"io"
     6  	"net/url"
     7  	"os"
     8  	"path"
     9  	"strings"
    10  
    11  	"github.com/aws/aws-sdk-go/aws"
    12  	"github.com/aws/aws-sdk-go/service/s3/s3manager"
    13  	"github.com/caarlos0/log"
    14  	"github.com/goreleaser/goreleaser/internal/artifact"
    15  	"github.com/goreleaser/goreleaser/internal/extrafiles"
    16  	"github.com/goreleaser/goreleaser/internal/semerrgroup"
    17  	"github.com/goreleaser/goreleaser/internal/tmpl"
    18  	"github.com/goreleaser/goreleaser/pkg/config"
    19  	"github.com/goreleaser/goreleaser/pkg/context"
    20  	"gocloud.dev/blob"
    21  	"gocloud.dev/secrets"
    22  
    23  	// Import the blob packages we want to be able to open.
    24  	_ "gocloud.dev/blob/azureblob"
    25  	_ "gocloud.dev/blob/gcsblob"
    26  	_ "gocloud.dev/blob/s3blob"
    27  
    28  	// import the secrets packages we want to be able to be used.
    29  	_ "gocloud.dev/secrets/awskms"
    30  	_ "gocloud.dev/secrets/azurekeyvault"
    31  	_ "gocloud.dev/secrets/gcpkms"
    32  )
    33  
    34  func urlFor(ctx *context.Context, conf config.Blob) (string, error) {
    35  	bucket, err := tmpl.New(ctx).Apply(conf.Bucket)
    36  	if err != nil {
    37  		return "", err
    38  	}
    39  
    40  	provider, err := tmpl.New(ctx).Apply(conf.Provider)
    41  	if err != nil {
    42  		return "", err
    43  	}
    44  
    45  	bucketURL := fmt.Sprintf("%s://%s", provider, bucket)
    46  	if provider != "s3" {
    47  		return bucketURL, nil
    48  	}
    49  
    50  	query := url.Values{}
    51  
    52  	endpoint, err := tmpl.New(ctx).Apply(conf.Endpoint)
    53  	if err != nil {
    54  		return "", err
    55  	}
    56  	if endpoint != "" {
    57  		query.Add("endpoint", endpoint)
    58  		if conf.S3ForcePathStyle == nil {
    59  			query.Add("s3ForcePathStyle", "true")
    60  		} else {
    61  			query.Add("s3ForcePathStyle", fmt.Sprintf("%t", *conf.S3ForcePathStyle))
    62  		}
    63  	}
    64  
    65  	region, err := tmpl.New(ctx).Apply(conf.Region)
    66  	if err != nil {
    67  		return "", err
    68  	}
    69  	if region != "" {
    70  		query.Add("region", region)
    71  	}
    72  
    73  	if conf.DisableSSL {
    74  		query.Add("disableSSL", "true")
    75  	}
    76  
    77  	if len(query) > 0 {
    78  		bucketURL = bucketURL + "?" + query.Encode()
    79  	}
    80  
    81  	return bucketURL, nil
    82  }
    83  
    84  // Takes goreleaser context(which includes artificats) and bucketURL for
    85  // upload to destination (eg: gs://gorelease-bucket) using the given uploader
    86  // implementation.
    87  func doUpload(ctx *context.Context, conf config.Blob) error {
    88  	dir, err := tmpl.New(ctx).Apply(conf.Directory)
    89  	if err != nil {
    90  		return err
    91  	}
    92  	dir = strings.TrimPrefix(dir, "/")
    93  
    94  	bucketURL, err := urlFor(ctx, conf)
    95  	if err != nil {
    96  		return err
    97  	}
    98  
    99  	byTypes := []artifact.Filter{
   100  		artifact.ByType(artifact.UploadableArchive),
   101  		artifact.ByType(artifact.UploadableBinary),
   102  		artifact.ByType(artifact.UploadableSourceArchive),
   103  		artifact.ByType(artifact.Checksum),
   104  		artifact.ByType(artifact.Signature),
   105  		artifact.ByType(artifact.Certificate),
   106  		artifact.ByType(artifact.LinuxPackage),
   107  		artifact.ByType(artifact.SBOM),
   108  	}
   109  	if conf.IncludeMeta {
   110  		byTypes = append(byTypes, artifact.ByType(artifact.Metadata))
   111  	}
   112  
   113  	filter := artifact.Or(byTypes...)
   114  	if len(conf.IDs) > 0 {
   115  		filter = artifact.And(filter, artifact.ByIDs(conf.IDs...))
   116  	}
   117  
   118  	up := &productionUploader{
   119  		cacheControl:       conf.CacheControl,
   120  		contentDisposition: conf.ContentDisposition,
   121  	}
   122  	if conf.Provider == "s3" && conf.ACL != "" {
   123  		up.beforeWrite = func(asFunc func(interface{}) bool) error {
   124  			req := &s3manager.UploadInput{}
   125  			if !asFunc(&req) {
   126  				return fmt.Errorf("could not apply before write")
   127  			}
   128  			req.ACL = aws.String(conf.ACL)
   129  			return nil
   130  		}
   131  	}
   132  
   133  	if err := up.Open(ctx, bucketURL); err != nil {
   134  		return handleError(err, bucketURL)
   135  	}
   136  	defer up.Close()
   137  
   138  	g := semerrgroup.New(ctx.Parallelism)
   139  	for _, artifact := range ctx.Artifacts.Filter(filter).List() {
   140  		artifact := artifact
   141  		g.Go(func() error {
   142  			// TODO: replace this with ?prefix=folder on the bucket url
   143  			dataFile := artifact.Path
   144  			uploadFile := path.Join(dir, artifact.Name)
   145  
   146  			return uploadData(ctx, conf, up, dataFile, uploadFile, bucketURL)
   147  		})
   148  	}
   149  
   150  	files, err := extrafiles.Find(ctx, conf.ExtraFiles)
   151  	if err != nil {
   152  		return err
   153  	}
   154  	for name, fullpath := range files {
   155  		name := name
   156  		fullpath := fullpath
   157  		g.Go(func() error {
   158  			uploadFile := path.Join(dir, name)
   159  			return uploadData(ctx, conf, up, fullpath, uploadFile, bucketURL)
   160  		})
   161  	}
   162  
   163  	return g.Wait()
   164  }
   165  
   166  func uploadData(ctx *context.Context, conf config.Blob, up uploader, dataFile, uploadFile, bucketURL string) error {
   167  	data, err := getData(ctx, conf, dataFile)
   168  	if err != nil {
   169  		return err
   170  	}
   171  
   172  	if err := up.Upload(ctx, uploadFile, data); err != nil {
   173  		return handleError(err, bucketURL)
   174  	}
   175  	return nil
   176  }
   177  
   178  // errorContains check if error contains specific string.
   179  func errorContains(err error, subs ...string) bool {
   180  	for _, sub := range subs {
   181  		if strings.Contains(err.Error(), sub) {
   182  			return true
   183  		}
   184  	}
   185  	return false
   186  }
   187  
   188  func handleError(err error, url string) error {
   189  	switch {
   190  	case errorContains(err, "NoSuchBucket", "ContainerNotFound", "notFound"):
   191  		return fmt.Errorf("provided bucket does not exist: %s: %w", url, err)
   192  	case errorContains(err, "NoCredentialProviders"):
   193  		return fmt.Errorf("check credentials and access to bucket: %s: %w", url, err)
   194  	case errorContains(err, "InvalidAccessKeyId"):
   195  		return fmt.Errorf("aws access key id you provided does not exist in our records: %w", err)
   196  	case errorContains(err, "AuthenticationFailed"):
   197  		return fmt.Errorf("azure storage key you provided is not valid: %w", err)
   198  	case errorContains(err, "invalid_grant"):
   199  		return fmt.Errorf("google app credentials you provided is not valid: %w", err)
   200  	case errorContains(err, "no such host"):
   201  		return fmt.Errorf("azure storage account you provided is not valid: %w", err)
   202  	case errorContains(err, "ServiceCode=ResourceNotFound"):
   203  		return fmt.Errorf("missing azure storage key for provided bucket %s: %w", url, err)
   204  	default:
   205  		return fmt.Errorf("failed to write to bucket: %w", err)
   206  	}
   207  }
   208  
   209  func getData(ctx *context.Context, conf config.Blob, path string) ([]byte, error) {
   210  	data, err := os.ReadFile(path)
   211  	if err != nil {
   212  		return data, fmt.Errorf("failed to open file %s: %w", path, err)
   213  	}
   214  	if conf.KMSKey == "" {
   215  		return data, nil
   216  	}
   217  	keeper, err := secrets.OpenKeeper(ctx, conf.KMSKey)
   218  	if err != nil {
   219  		return data, fmt.Errorf("failed to open kms %s: %w", conf.KMSKey, err)
   220  	}
   221  	defer keeper.Close()
   222  	data, err = keeper.Encrypt(ctx, data)
   223  	if err != nil {
   224  		return data, fmt.Errorf("failed to encrypt with kms: %w", err)
   225  	}
   226  	return data, err
   227  }
   228  
   229  // uploader implements upload.
   230  type uploader interface {
   231  	io.Closer
   232  	Open(ctx *context.Context, url string) error
   233  	Upload(ctx *context.Context, path string, data []byte) error
   234  }
   235  
   236  // productionUploader actually do upload to.
   237  type productionUploader struct {
   238  	bucket             *blob.Bucket
   239  	beforeWrite        func(asFunc func(interface{}) bool) error
   240  	cacheControl       []string
   241  	contentDisposition string
   242  }
   243  
   244  func (u *productionUploader) Close() error {
   245  	if u.bucket == nil {
   246  		return nil
   247  	}
   248  	return u.bucket.Close()
   249  }
   250  
   251  func (u *productionUploader) Open(ctx *context.Context, bucket string) error {
   252  	log.WithField("bucket", bucket).Debug("uploading")
   253  
   254  	conn, err := blob.OpenBucket(ctx, bucket)
   255  	if err != nil {
   256  		return err
   257  	}
   258  	u.bucket = conn
   259  	return nil
   260  }
   261  
   262  func (u *productionUploader) Upload(ctx *context.Context, filepath string, data []byte) error {
   263  	log.WithField("path", filepath).Info("uploading")
   264  
   265  	disp, err := tmpl.New(ctx).WithExtraFields(tmpl.Fields{
   266  		"Filename": path.Base(filepath),
   267  	}).Apply(u.contentDisposition)
   268  	if err != nil {
   269  		return err
   270  	}
   271  
   272  	opts := &blob.WriterOptions{
   273  		ContentDisposition: disp,
   274  		BeforeWrite:        u.beforeWrite,
   275  		CacheControl:       strings.Join(u.cacheControl, ", "),
   276  	}
   277  	w, err := u.bucket.NewWriter(ctx, filepath, opts)
   278  	if err != nil {
   279  		return err
   280  	}
   281  	defer func() { _ = w.Close() }()
   282  	if _, err = w.Write(data); err != nil {
   283  		return err
   284  	}
   285  	return w.Close()
   286  }