github.com/mysteriumnetwork/node@v0.0.0-20240516044423-365054f76801/ci/storage/s3.go (about)

     1  /*
     2   * Copyright (C) 2019 The "MysteriumNetwork/node" Authors.
     3   *
     4   * This program is free software: you can redistribute it and/or modify
     5   * it under the terms of the GNU General Public License as published by
     6   * the Free Software Foundation, either version 3 of the License, or
     7   * (at your option) any later version.
     8   *
     9   * This program is distributed in the hope that it will be useful,
    10   * but WITHOUT ANY WARRANTY; without even the implied warranty of
    11   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    12   * GNU General Public License for more details.
    13   *
    14   * You should have received a copy of the GNU General Public License
    15   * along with this program.  If not, see <http://www.gnu.org/licenses/>.
    16   */
    17  
    18  package storage
    19  
    20  import (
    21  	"context"
    22  	"os"
    23  	"path"
    24  	"path/filepath"
    25  
    26  	"github.com/aws/aws-sdk-go-v2/aws"
    27  	"github.com/aws/aws-sdk-go-v2/config"
    28  	"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
    29  	"github.com/aws/aws-sdk-go-v2/service/s3"
    30  	"github.com/aws/aws-sdk-go-v2/service/s3/types"
    31  	"github.com/magefile/mage/sh"
    32  	"github.com/pkg/errors"
    33  	"github.com/rs/zerolog/log"
    34  
    35  	"github.com/mysteriumnetwork/go-ci/env"
    36  	"github.com/mysteriumnetwork/node/logconfig"
    37  )
    38  
    39  // Storage wraps AWS S3 client, configures for s3.mysterium.network
    40  // and provides convenience methods
    41  type Storage struct {
    42  	*s3.Client
    43  }
    44  
    45  var cacheDir string
    46  
    47  const cacheDirPermissions = 0700
    48  
    49  func init() {
    50  	var err error
    51  	home, err := os.UserHomeDir()
    52  	if err != nil {
    53  		log.Err(err).Msg("Failed to determine home directory")
    54  		os.Exit(1)
    55  	}
    56  	cacheDir := path.Join(home, ".myst-build-cache")
    57  	err = os.Mkdir(cacheDir, cacheDirPermissions)
    58  	if err != nil && !os.IsExist(err) {
    59  		log.Err(err).Msg("Failed to create storage cache directory")
    60  		os.Exit(1)
    61  	}
    62  }
    63  
    64  // NewClient returns *s3.Client, configured to work with https://s3.mysterium.network storage
    65  func NewClient() (*Storage, error) {
    66  	customResolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
    67  		return aws.Endpoint{
    68  			URL:           "https://s3.mysterium.network",
    69  			SigningRegion: region,
    70  		}, nil
    71  	})
    72  
    73  	cfg, err := config.LoadDefaultConfig(
    74  		context.TODO(),
    75  		config.WithRegion("eu-central-1"),
    76  		config.WithEndpointResolver(customResolver),
    77  	)
    78  
    79  	if err != nil {
    80  		return nil, err
    81  	}
    82  
    83  	client := s3.NewFromConfig(cfg, func(o *s3.Options) {
    84  		o.UsePathStyle = true
    85  	})
    86  	return &Storage{client}, nil
    87  }
    88  
    89  // ListObjects lists objects in storage bucket
    90  func (s *Storage) ListObjects(bucket string) ([]types.Object, error) {
    91  	req, err := s.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
    92  		Bucket: aws.String(bucket),
    93  	})
    94  	if err != nil {
    95  		return nil, err
    96  	}
    97  
    98  	return req.Contents, nil
    99  }
   100  
   101  // FindObject finds an object in storage bucket satisfying given predicate
   102  func (s *Storage) FindObject(bucket string, predicate func(types.Object) bool) (*types.Object, error) {
   103  	objects, err := s.ListObjects(bucket)
   104  	if err != nil {
   105  		return nil, err
   106  	}
   107  	for _, obj := range objects {
   108  		if predicate(obj) {
   109  			return &obj, nil
   110  		}
   111  	}
   112  	return nil, nil
   113  }
   114  
   115  // GetCacheableFile finds a file in a storage bucket satisfying given predicate. If a local copy with the same size
   116  // does not exist, downloads the file. Otherwise, returns a cached copy.
   117  func (s *Storage) GetCacheableFile(bucket string, predicate func(types.Object) bool) (string, error) {
   118  	object, err := s.FindObject(bucket, predicate)
   119  	if err != nil {
   120  		return "", errors.Wrap(err, "could not find file in bucket")
   121  	}
   122  	remoteFilename := aws.ToString(object.Key)
   123  	remoteFileSize := object.Size
   124  
   125  	localFilename := filepath.Join(cacheDir, remoteFilename)
   126  	localFileInfo, err := os.Stat(localFilename)
   127  
   128  	var download bool
   129  	switch {
   130  	case err == nil && localFileInfo.Size() != remoteFileSize:
   131  		log.Info().Msgf(
   132  			"Cached copy found: %s, but size mismatched, expected: %d, found: %d",
   133  			localFilename, remoteFileSize, localFileInfo.Size(),
   134  		)
   135  		download = true
   136  	case err != nil && os.IsNotExist(err):
   137  		log.Info().Msgf("Cached copy not found: %s", localFilename)
   138  		download = true
   139  	case err != nil:
   140  		return "", errors.Wrap(err, "error looking up cached copy")
   141  	}
   142  
   143  	if download {
   144  		log.Info().Msg("downloading file from the bucket")
   145  		file, err := os.OpenFile(localFilename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, cacheDirPermissions)
   146  		if err != nil {
   147  			return "", err
   148  		}
   149  		defer file.Close()
   150  
   151  		downloader := manager.NewDownloader(s)
   152  
   153  		numBytes, err := downloader.Download(context.TODO(), file, &s3.GetObjectInput{
   154  			Bucket: aws.String(bucket),
   155  			Key:    aws.String(remoteFilename),
   156  		})
   157  		if err != nil {
   158  			return "", err
   159  		}
   160  		log.Info().Msgf("Downloaded file: %s (%dMB)", localFilename, numBytes/1024/1024)
   161  	} else {
   162  		log.Info().Msg("Returning cached copy")
   163  	}
   164  
   165  	return localFilename, nil
   166  }
   167  
   168  // MakeBucket creates a bucket in s3 for the build (env.BuildNumber)
   169  func MakeBucket() error {
   170  	logconfig.Bootstrap()
   171  	url, err := bucketUrlForBuild()
   172  	if err != nil {
   173  		return err
   174  	}
   175  	return sh.RunV("bin/s3", "mb", url)
   176  }
   177  
   178  // RemoveBucket removes bucket
   179  func RemoveBucket() error {
   180  	logconfig.Bootstrap()
   181  	url, err := bucketUrlForBuild()
   182  	if err != nil {
   183  		return err
   184  	}
   185  	return sh.RunV("bin/s3", "rb", "--force", url)
   186  }
   187  
   188  // UploadArtifacts uploads all artifacts to s3 build bucket
   189  func UploadArtifacts() error {
   190  	url, err := bucketUrlForBuild()
   191  	if err != nil {
   192  		return err
   193  	}
   194  	return Sync("build/package", url+"/build-artifacts")
   195  }
   196  
   197  // UploadDockerImages uploads all docker images to s3 build bucket
   198  func UploadDockerImages() error {
   199  	url, err := bucketUrlForBuild()
   200  	if err != nil {
   201  		return err
   202  	}
   203  	return Sync("build/docker-images", url+"/docker-images")
   204  }
   205  
   206  // UploadSingleArtifact uploads a single file to s3 build bucket
   207  func UploadSingleArtifact(path string) error {
   208  	url, err := bucketUrlForBuild()
   209  	if err != nil {
   210  		return err
   211  	}
   212  	return Copy(path, url+"/build-artifacts/")
   213  }
   214  
   215  // DownloadArtifacts downloads all artifacts from s3 build bucket
   216  func DownloadArtifacts() error {
   217  	url, err := bucketUrlForBuild()
   218  	if err != nil {
   219  		return err
   220  	}
   221  	return Sync(url+"/build-artifacts", "build/package")
   222  }
   223  
   224  // DownloadDockerImages fetches image archives from s3 bucket
   225  func DownloadDockerImages() error {
   226  	url, err := bucketUrlForBuild()
   227  	if err != nil {
   228  		return err
   229  	}
   230  	return Sync(url+"/docker-images", "build/docker-images")
   231  }
   232  
   233  // Sync syncs directories and S3 prefixes.
   234  // Recursively copies new and updated files from the source directory to the destination.
   235  func Sync(source, target string) error {
   236  	if err := sh.RunV("bin/s3", "sync", source, target); err != nil {
   237  		return errors.Wrap(err, "failed to sync artifacts")
   238  	}
   239  	log.Info().Msg("S3 sync successful")
   240  	return nil
   241  }
   242  
   243  // Copy copies a local file or S3 object to another location locally or in S3.
   244  func Copy(source, target string) error {
   245  	if err := sh.RunV("bin/s3", "cp", source, target); err != nil {
   246  		return errors.Wrap(err, "failed to copy artifacts")
   247  	}
   248  	log.Info().Msg("S3 copy successful")
   249  	return nil
   250  }
   251  
   252  func bucketUrlForBuild() (string, error) {
   253  	if err := env.EnsureEnvVars(env.BuildNumber, env.GithubOwner, env.GithubRepository); err != nil {
   254  		return "", err
   255  	}
   256  	bucket := env.Str(env.GithubOwner) + "-" + env.Str(env.GithubRepository) + "-" + env.Str(env.BuildNumber)
   257  	return "s3://" + bucket, nil
   258  }