github.com/pingcap/br@v5.3.0-alpha.0.20220125034240-ec59c7b6ce30+incompatible/pkg/storage/storage.go (about) 1 // Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. 2 3 package storage 4 5 import ( 6 "context" 7 "io" 8 "net/http" 9 10 "github.com/pingcap/errors" 11 backuppb "github.com/pingcap/kvproto/pkg/backup" 12 13 berrors "github.com/pingcap/br/pkg/errors" 14 ) 15 16 // Permission represents the permission we need to check in create storage. 17 type Permission string 18 19 const ( 20 // AccessBuckets represents bucket access permission 21 // it replace the origin skip-check-path. 22 AccessBuckets Permission = "AccessBucket" 23 24 // ListObjects represents listObjects permission 25 ListObjects Permission = "ListObjects" 26 // GetObject represents GetObject permission 27 GetObject Permission = "GetObject" 28 // PutObject represents PutObject permission 29 PutObject Permission = "PutObject" 30 ) 31 32 // WalkOption is the option of storage.WalkDir. 33 type WalkOption struct { 34 // walk on SubDir of specify directory 35 SubDir string 36 // ListCount is the number of entries per page. 37 // 38 // In cloud storages such as S3 and GCS, the files listed and sent in pages. 39 // Typically a page contains 1000 files, and if a folder has 3000 descendant 40 // files, one would need 3 requests to retrieve all of them. This parameter 41 // controls this size. Note that both S3 and GCS limits the maximum to 1000. 42 // 43 // Typically you want to leave this field unassigned (zero) to use the 44 // default value (1000) to minimize the number of requests, unless you want 45 // to reduce the possibility of timeout on an extremely slow connection, or 46 // perform testing. 47 ListCount int64 48 } 49 50 // ReadSeekCloser is the interface that groups the basic Read, Seek and Close methods. 51 type ReadSeekCloser interface { 52 io.Reader 53 io.Seeker 54 io.Closer 55 } 56 57 // Uploader upload file with chunks. 58 type Uploader interface { 59 // UploadPart upload part of file data to storage 60 UploadPart(ctx context.Context, data []byte) error 61 // CompleteUpload make the upload data to a complete file 62 CompleteUpload(ctx context.Context) error 63 } 64 65 // Writer is like io.Writer but with Context, create a new writer on top of Uploader with NewUploaderWriter. 66 type Writer interface { 67 // Write writes to buffer and if chunk is filled will upload it 68 Write(ctx context.Context, p []byte) (int, error) 69 // Close writes final chunk and completes the upload 70 Close(ctx context.Context) error 71 } 72 73 // ExternalStorage represents a kind of file system storage. 74 type ExternalStorage interface { 75 // WriteFile writes a complete file to storage, similar to os.WriteFile 76 WriteFile(ctx context.Context, name string, data []byte) error 77 // ReadFile reads a complete file from storage, similar to os.ReadFile 78 ReadFile(ctx context.Context, name string) ([]byte, error) 79 // FileExists return true if file exists 80 FileExists(ctx context.Context, name string) (bool, error) 81 // Open a Reader by file path. path is relative path to storage base path 82 Open(ctx context.Context, path string) (ExternalFileReader, error) 83 // WalkDir traverse all the files in a dir. 84 // 85 // fn is the function called for each regular file visited by WalkDir. 86 // The argument `path` is the file path that can be used in `Open` 87 // function; the argument `size` is the size in byte of the file determined 88 // by path. 89 WalkDir(ctx context.Context, opt *WalkOption, fn func(path string, size int64) error) error 90 91 // URI returns the base path as a URI 92 URI() string 93 94 // Create opens a file writer by path. path is relative path to storage base path 95 Create(ctx context.Context, path string) (ExternalFileWriter, error) 96 } 97 98 // ExternalFileReader represents the streaming external file reader. 99 type ExternalFileReader interface { 100 io.ReadCloser 101 io.Seeker 102 } 103 104 // ExternalFileWriter represents the streaming external file writer. 105 type ExternalFileWriter interface { 106 // Write writes to buffer and if chunk is filled will upload it 107 Write(ctx context.Context, p []byte) (int, error) 108 // Close writes final chunk and completes the upload 109 Close(ctx context.Context) error 110 } 111 112 // ExternalStorageOptions are backend-independent options provided to New. 113 type ExternalStorageOptions struct { 114 // SendCredentials marks whether to send credentials downstream. 115 // 116 // This field should be set to false if the credentials are provided to 117 // downstream via external key managers, e.g. on K8s or cloud provider. 118 SendCredentials bool 119 120 // NoCredentials means that no cloud credentials are supplied to BR 121 NoCredentials bool 122 123 // SkipCheckPath marks whether to skip checking path's existence. 124 // 125 // This should only be set to true in testing, to avoid interacting with the 126 // real world. 127 // When this field is false (i.e. path checking is enabled), the New() 128 // function will ensure the path referred by the backend exists by 129 // recursively creating the folders. This will also throw an error if such 130 // operation is impossible (e.g. when the bucket storing the path is missing). 131 132 // deprecated: use checkPermissions and specify the checkPermission instead. 133 SkipCheckPath bool 134 135 // HTTPClient to use. The created storage may ignore this field if it is not 136 // directly using HTTP (e.g. the local storage). 137 HTTPClient *http.Client 138 139 // CheckPermissions check the given permission in New() function. 140 // make sure we can access the storage correctly before execute tasks. 141 CheckPermissions []Permission 142 } 143 144 // Create creates ExternalStorage. 145 // 146 // Please consider using `New` in the future. 147 func Create(ctx context.Context, backend *backuppb.StorageBackend, sendCreds bool) (ExternalStorage, error) { 148 return New(ctx, backend, &ExternalStorageOptions{ 149 SendCredentials: sendCreds, 150 SkipCheckPath: false, 151 HTTPClient: nil, 152 }) 153 } 154 155 // New creates an ExternalStorage with options. 156 func New(ctx context.Context, backend *backuppb.StorageBackend, opts *ExternalStorageOptions) (ExternalStorage, error) { 157 switch backend := backend.Backend.(type) { 158 case *backuppb.StorageBackend_Local: 159 if backend.Local == nil { 160 return nil, errors.Annotate(berrors.ErrStorageInvalidConfig, "local config not found") 161 } 162 return NewLocalStorage(backend.Local.Path) 163 case *backuppb.StorageBackend_S3: 164 if backend.S3 == nil { 165 return nil, errors.Annotate(berrors.ErrStorageInvalidConfig, "s3 config not found") 166 } 167 return newS3Storage(backend.S3, opts) 168 case *backuppb.StorageBackend_Noop: 169 return newNoopStorage(), nil 170 case *backuppb.StorageBackend_Gcs: 171 if backend.Gcs == nil { 172 return nil, errors.Annotate(berrors.ErrStorageInvalidConfig, "GCS config not found") 173 } 174 return newGCSStorage(ctx, backend.Gcs, opts) 175 default: 176 return nil, errors.Annotatef(berrors.ErrStorageInvalidConfig, "storage %T is not supported yet", backend) 177 } 178 }