github.com/sentienttechnologies/studio-go-runner@v0.0.0-20201118202441-6d21f2ced8ee/internal/runner/s3.go (about) 1 // Copyright 2018-2020 (c) Cognizant Digital Business, Evolutionary AI. All rights reserved. Issued under the Apache 2.0 License. 2 3 package runner 4 5 // This file contains the implementation for the storage sub system that will 6 // be used by the runner to retrieve storage from cloud providers or localized storage 7 8 import ( 9 "archive/tar" 10 "bufio" 11 "compress/bzip2" 12 "compress/gzip" 13 "context" 14 "crypto/tls" 15 "crypto/x509" 16 "flag" 17 "fmt" 18 "io" 19 "io/ioutil" 20 "net/http" 21 "os" 22 "path/filepath" 23 "strings" 24 "time" 25 26 humanize "github.com/dustin/go-humanize" 27 "github.com/minio/minio-go/pkg/credentials" 28 29 "github.com/minio/minio-go" 30 31 bzip2w "github.com/dsnet/compress/bzip2" 32 33 "github.com/go-stack/stack" 34 "github.com/jjeffery/kv" // MIT License 35 ) 36 37 var ( 38 s3CA = flag.String("s3-ca", "", "Used to specify a PEM file for the CA used in securing the S3/Minio connection") 39 s3Cert = flag.String("s3-cert", "", "Used to specify a cert file for securing the S3/Minio connection, do not use with the s3-pem option") 40 s3Key = flag.String("s3-key", "", "Used to specify a key file for securing the S3/Minio connection, do not use with the s3-pem option") 41 ) 42 43 // StorageImpl is a type that describes the implementation of an S3 storage entity 44 type StorageImpl int 45 46 const ( 47 // MinioImpl is a minio implementation of an S3 resource 48 MinioImpl StorageImpl = iota 49 // S3Impl is the references aws implementation of an S3 resource 50 S3Impl 51 ) 52 53 type s3Storage struct { 54 storage StorageImpl 55 endpoint string 56 project string 57 bucket string 58 key string 59 client *minio.Client 60 anonClient *minio.Client 61 } 62 63 // NewS3storage is used to initialize a client that will communicate with S3 compatible storage. 64 // 65 // S3 configuration will only be respected using the AWS environment variables. 66 // 67 func NewS3storage(ctx context.Context, projectID string, creds string, env map[string]string, endpoint string, 68 bucket string, key string, validate bool, useSSL bool) (s *s3Storage, err kv.Error) { 69 70 s = &s3Storage{ 71 storage: S3Impl, 72 endpoint: endpoint, 73 project: projectID, 74 bucket: bucket, 75 key: key, 76 } 77 78 access := "" 79 secret := "" 80 for k, v := range env { 81 switch strings.ToUpper(k) { 82 case "AWS_ACCESS_KEY_ID", "MINIO_ACCESS_KEY": 83 access = v 84 case "AWS_SECRET_ACCESS_KEY", "MINIO_SECRET_KEY": 85 secret = v 86 case "MINIO_TEST_SERVER": 87 s.storage = MinioImpl 88 if len(s.endpoint) == 0 { 89 s.endpoint = v 90 } 91 } 92 } 93 94 // When using official S3 then the region will be encoded into the endpoint and in order to 95 // prevent cross region authentication problems we will need to extract it and use the minio 96 // NewWithOptions function and specify the region explicitly to reduce lookups, minio does 97 // the processing to get a well known DNS name in these cases. 98 // 99 // For additional information about regions and naming for S3 endpoints please review the following, 100 // http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region 101 // 102 region := "" 103 if s.storage == S3Impl { 104 region = env["AWS_DEFAULT_REGION"] 105 106 if s.endpoint != "s3.amazonaws.com" { 107 if (strings.HasPrefix(s.endpoint, "s3-") || strings.HasPrefix(s.endpoint, "s3.")) && 108 strings.HasSuffix(s.endpoint, ".amazonaws.com") { 109 region = s.endpoint[3:] 110 region = strings.TrimSuffix(region, ".amazonaws.com") 111 // Revert to a single well known address for DNS lookups to improve interoperability 112 // when running in k8s etc 113 s.endpoint = "s3.amazonaws.com" 114 useSSL = true 115 } 116 } 117 118 if len(region) == 0 { 119 msg := "the AWS region is missing from the studioML request, and could not be deduced from the endpoint" 120 return nil, kv.NewError(msg).With("endpoint", s.endpoint).With("stack", stack.Trace().TrimRuntime()) 121 } 122 } 123 124 // The use of SSL is mandated at this point to ensure that data protections 125 // are effective when used by callers 126 // 127 pemData := []byte{} 128 cert := tls.Certificate{} 129 errGo := fmt.Errorf("") 130 _ = errGo // Bypass the ineffectual assignment check 131 132 if len(*s3Cert) != 0 || len(*s3Key) != 0 { 133 if len(*s3Cert) == 0 || len(*s3Key) == 0 { 134 return nil, kv.NewError("the s3-cert and s3-key files when used must both be specified") 135 } 136 if cert, errGo = tls.LoadX509KeyPair(*s3Cert, *s3Key); errGo != nil { 137 return nil, kv.Wrap(errGo) 138 } 139 useSSL = true 140 } 141 142 if len(*s3CA) != 0 { 143 stat, errGo := os.Stat(*s3CA) 144 if errGo != nil { 145 return nil, kv.Wrap(errGo, "unable to read a PEM, or Certificate file from disk for S3 security") 146 } 147 if stat.Size() > 128*1024 { 148 return nil, kv.NewError("the PEM, or Certificate file is suspicously large, too large to be a PEM file") 149 } 150 if pemData, errGo = ioutil.ReadFile(*s3CA); errGo != nil { 151 return nil, kv.Wrap(errGo, "PEM, or Certificate file read failed").With("stack", stack.Trace().TrimRuntime()) 152 153 } 154 if len(pemData) == 0 { 155 return nil, kv.NewError("PEM, or Certificate file was empty, PEM data is needed when the file name is specified") 156 } 157 useSSL = true 158 } 159 160 // Using the BucketLookupPath strategy to avoid using DNS lookups for the buckets first 161 options := minio.Options{ 162 Creds: credentials.NewStaticV4(access, secret, ""), 163 Secure: useSSL, 164 Region: region, 165 BucketLookup: minio.BucketLookupPath, 166 } 167 if s.client, errGo = minio.NewWithOptions(s.endpoint, &options); errGo != nil { 168 return nil, kv.Wrap(errGo).With("endpoint", s.endpoint, "options", fmt.Sprintf("%+v", options)).With("stack", stack.Trace().TrimRuntime()) 169 } 170 171 anonOptions := minio.Options{ 172 // Using empty values seems to be the most appropriate way of getting anonymous access 173 // however none of this is documented any where I could find. This is the only way 174 // I could get it to work without panics from the libraries being used. 175 Creds: credentials.NewStaticV4("", "", ""), 176 Secure: useSSL, 177 Region: region, 178 BucketLookup: minio.BucketLookupPath, 179 } 180 if s.anonClient, errGo = minio.NewWithOptions(s.endpoint, &anonOptions); errGo != nil { 181 return nil, kv.Wrap(errGo).With("endpoint", s.endpoint, "options", fmt.Sprintf("%+v", options)).With("stack", stack.Trace().TrimRuntime()) 182 } 183 184 if useSSL { 185 caCerts := &x509.CertPool{} 186 187 if len(*s3CA) != 0 { 188 if !caCerts.AppendCertsFromPEM(pemData) { 189 return nil, kv.NewError("PEM Data could not be added to the system default certificate pool").With("stack", stack.Trace().TrimRuntime()) 190 } 191 } else { 192 // First load the default CA's 193 caCerts, errGo = x509.SystemCertPool() 194 if errGo != nil { 195 return nil, kv.Wrap(errGo).With("stack", stack.Trace().TrimRuntime()) 196 } 197 } 198 199 s.client.SetCustomTransport(&http.Transport{ 200 TLSClientConfig: &tls.Config{ 201 Certificates: []tls.Certificate{cert}, 202 RootCAs: caCerts, 203 }, 204 }) 205 s.anonClient.SetCustomTransport(&http.Transport{ 206 TLSClientConfig: &tls.Config{ 207 Certificates: []tls.Certificate{cert}, 208 RootCAs: caCerts, 209 }, 210 }) 211 } 212 213 return s, nil 214 } 215 216 func (s *s3Storage) Close() { 217 } 218 219 // Hash returns aplatform specific MD5 of the contents of the file that can be used by caching and other functions 220 // to track storage changes etc 221 // 222 // The hash on AWS S3 is not a plain MD5 but uses multiple hashes from file 223 // segments to increase the speed of hashing and also to reflect the multipart download 224 // processing that was used for the file, for a full explanation please see 225 // https://stackoverflow.com/questions/12186993/what-is-the-algorithm-to-compute-the-amazon-s3-etag-for-a-file-larger-than-5gb 226 // 227 // 228 func (s *s3Storage) Hash(ctx context.Context, name string) (hash string, err kv.Error) { 229 key := name 230 if len(key) == 0 { 231 key = s.key 232 } 233 info, errGo := s.client.StatObject(s.bucket, key, minio.StatObjectOptions{}) 234 if errGo != nil { 235 if minio.ToErrorResponse(errGo).Code == "AccessDenied" { 236 // Try accessing the artifact without any credentials 237 info, errGo = s.anonClient.StatObject(s.bucket, key, minio.StatObjectOptions{}) 238 } 239 } 240 if errGo != nil { 241 return "", kv.Wrap(errGo).With("bucket", s.bucket).With("key", key).With("stack", stack.Trace().TrimRuntime()) 242 } 243 return info.ETag, nil 244 } 245 246 func (s *s3Storage) listObjects(keyPrefix string) (names []string, warnings []kv.Error, err kv.Error) { 247 names = []string{} 248 isRecursive := true 249 250 // Create a done channel to control 'ListObjects' go routine. 251 doneCh := make(chan struct{}) 252 253 // Indicate to our routine to exit cleanly upon return. 254 defer close(doneCh) 255 256 // Try all available clients with possibly various credentials to get things 257 for _, aClient := range []*minio.Client{s.client, s.anonClient} { 258 objectCh := aClient.ListObjects(s.bucket, keyPrefix, isRecursive, doneCh) 259 for object := range objectCh { 260 if object.Err != nil { 261 if minio.ToErrorResponse(object.Err).Code == "AccessDenied" { 262 continue 263 } 264 return nil, nil, kv.Wrap(object.Err).With("bucket", s.bucket, "keyPrefix", keyPrefix).With("stack", stack.Trace().TrimRuntime()) 265 } 266 names = append(names, object.Key) 267 } 268 } 269 return names, nil, err 270 } 271 272 // Gather is used to retrieve files prefixed with a specific key. It is used to retrieve the individual files 273 // associated with a previous Hoard operation. 274 // 275 func (s *s3Storage) Gather(ctx context.Context, keyPrefix string, outputDir string, tap io.Writer) (warnings []kv.Error, err kv.Error) { 276 // Retrieve a list of the known keys that match the key prefix 277 278 names := []string{} 279 _ = names // Bypass the ineffectual assignment check 280 281 names, warnings, err = s.listObjects(keyPrefix) 282 283 // Download these files 284 for _, key := range names { 285 w, e := s.Fetch(ctx, key, false, outputDir, tap) 286 if len(w) != 0 { 287 warnings = append(warnings, w...) 288 } 289 if e != nil { 290 err = e 291 } 292 } 293 return warnings, err 294 } 295 296 // Fetch is used to retrieve a file from a well known google storage bucket and either 297 // copy it directly into a directory, or unpack the file into the same directory. 298 // 299 // Calling this function with output not being a valid directory will result in an error 300 // being returned. 301 // 302 // The tap can be used to make a side copy of the content that is being read. 303 // 304 func (s *s3Storage) Fetch(ctx context.Context, name string, unpack bool, output string, tap io.Writer) (warns []kv.Error, err kv.Error) { 305 306 key := name 307 if len(key) == 0 { 308 key = s.key 309 } 310 errCtx := kv.With("output", output).With("name", name). 311 With("bucket", s.bucket).With("key", key).With("endpoint", s.endpoint) 312 313 // Make sure output is an existing directory 314 info, errGo := os.Stat(output) 315 if errGo != nil { 316 return warns, errCtx.Wrap(errGo).With("stack", stack.Trace().TrimRuntime()) 317 } 318 if !info.IsDir() { 319 return warns, errCtx.NewError("a directory was not used, or did not exist").With("stack", stack.Trace().TrimRuntime()) 320 } 321 322 fileType, w := MimeFromExt(name) 323 if w != nil { 324 warns = append(warns, w) 325 } 326 327 obj, errGo := s.client.GetObjectWithContext(ctx, s.bucket, key, minio.GetObjectOptions{}) 328 if errGo == nil { 329 // Errors can be delayed until the first interaction with the storage platform so 330 // we exercise access to the meta data at least to validate the object we have 331 _, errGo = obj.Stat() 332 } 333 if errGo != nil { 334 if minio.ToErrorResponse(errGo).Code == "AccessDenied" { 335 obj, errGo = s.anonClient.GetObjectWithContext(ctx, s.bucket, key, minio.GetObjectOptions{}) 336 if errGo == nil { 337 // Errors can be delayed until the first interaction with the storage platform so 338 // we exercise access to the meta data at least to validate the object we have 339 _, errGo = obj.Stat() 340 } 341 } 342 if errGo != nil { 343 return warns, errCtx.Wrap(errGo).With("stack", stack.Trace().TrimRuntime()) 344 } 345 } 346 defer obj.Close() 347 348 // If the unpack flag is set then use a tar decompressor and unpacker 349 // but first make sure the output location is an existing directory 350 if unpack { 351 352 var inReader io.ReadCloser 353 354 switch fileType { 355 case "application/x-gzip", "application/zip": 356 if tap != nil { 357 // Create a stack of reader that first tee off any data read to a tap 358 // the tap being able to send data to things like caches etc 359 // 360 // Second in the stack of readers after the TAP is a decompression reader 361 inReader, errGo = gzip.NewReader(io.TeeReader(obj, tap)) 362 } else { 363 inReader, errGo = gzip.NewReader(obj) 364 } 365 case "application/bzip2", "application/octet-stream": 366 if tap != nil { 367 // Create a stack of reader that first tee off any data read to a tap 368 // the tap being able to send data to things like caches etc 369 // 370 // Second in the stack of readers after the TAP is a decompression reader 371 inReader = ioutil.NopCloser(bzip2.NewReader(io.TeeReader(obj, tap))) 372 } else { 373 inReader = ioutil.NopCloser(bzip2.NewReader(obj)) 374 } 375 default: 376 if tap != nil { 377 // Create a stack of reader that first tee off any data read to a tap 378 // the tap being able to send data to things like caches etc 379 // 380 // Second in the stack of readers after the TAP is a decompression reader 381 inReader = ioutil.NopCloser(io.TeeReader(obj, tap)) 382 } else { 383 inReader = ioutil.NopCloser(obj) 384 } 385 } 386 if errGo != nil { 387 return warns, errCtx.Wrap(errGo).With("stack", stack.Trace().TrimRuntime()) 388 } 389 defer inReader.Close() 390 391 // Last in the stack is a tar file handling reader 392 tarReader := tar.NewReader(inReader) 393 394 for { 395 header, errGo := tarReader.Next() 396 if errGo == io.EOF { 397 break 398 } else if errGo != nil { 399 return warns, errCtx.Wrap(errGo).With("fileType", fileType).With("stack", stack.Trace().TrimRuntime()) 400 } 401 402 path := filepath.Join(output, header.Name) 403 404 if len(header.Linkname) != 0 { 405 if errGo = os.Symlink(header.Linkname, path); errGo != nil { 406 return warns, errCtx.Wrap(errGo, "symbolic link create failed").With("stack", stack.Trace().TrimRuntime()) 407 } 408 continue 409 } 410 411 switch header.Typeflag { 412 case tar.TypeDir: 413 if info.IsDir() { 414 if errGo = os.MkdirAll(path, os.FileMode(header.Mode)); errGo != nil { 415 return warns, errCtx.Wrap(errGo).With("stack", stack.Trace().TrimRuntime()).With("path", path) 416 } 417 } 418 case tar.TypeReg, tar.TypeRegA: 419 420 // If the file name included directories then these should be created 421 if parent, err := filepath.Abs(path); err == nil { 422 // implicitly 423 _ = os.MkdirAll(filepath.Dir(parent), os.ModePerm) 424 } 425 426 file, errGo := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, os.FileMode(header.Mode)) 427 if errGo != nil { 428 return warns, errCtx.Wrap(errGo).With("stack", stack.Trace().TrimRuntime()).With("path", path) 429 } 430 431 _, errGo = io.Copy(file, tarReader) 432 file.Close() 433 if errGo != nil { 434 return warns, errCtx.Wrap(errGo).With("stack", stack.Trace().TrimRuntime()).With("path", path) 435 } 436 default: 437 errGo = fmt.Errorf("unknown tar archive type '%c'", header.Typeflag) 438 return warns, errCtx.Wrap(errGo).With("stack", stack.Trace().TrimRuntime()).With("path", path) 439 } 440 } 441 } else { 442 errGo := os.MkdirAll(output, 0700) 443 if errGo != nil { 444 return warns, errCtx.Wrap(errGo).With("stack", stack.Trace().TrimRuntime()).With("output", output) 445 } 446 path := filepath.Join(output, filepath.Base(key)) 447 f, errGo := os.Create(path) 448 if errGo != nil { 449 return warns, errCtx.Wrap(errGo).With("stack", stack.Trace().TrimRuntime()).With("path", path) 450 } 451 defer f.Close() 452 453 outf := bufio.NewWriter(f) 454 if tap != nil { 455 // Create a stack of reader that first tee off any data read to a tap 456 // the tap being able to send data to things like caches etc 457 // 458 // Second in the stack of readers after the TAP is a decompression reader 459 _, errGo = io.Copy(outf, io.TeeReader(obj, tap)) 460 } else { 461 _, errGo = io.Copy(outf, obj) 462 } 463 if errGo != nil { 464 return warns, errCtx.Wrap(errGo).With("stack", stack.Trace().TrimRuntime()).With("path", path) 465 } 466 outf.Flush() 467 } 468 return warns, nil 469 } 470 471 // uploadFile can be used to transmit a file to the S3 server using a fully qualified file 472 // name and key 473 // 474 func (s *s3Storage) uploadFile(ctx context.Context, src string, dest string) (err kv.Error) { 475 if ctx.Err() != nil { 476 return kv.NewError("upload context cancelled").With("stack", stack.Trace().TrimRuntime()).With("src", src, "bucket", s.bucket, "key", dest) 477 } 478 479 file, errGo := os.Open(filepath.Clean(src)) 480 if errGo != nil { 481 return kv.Wrap(errGo).With("stack", stack.Trace().TrimRuntime()).With("src", src) 482 } 483 defer file.Close() 484 485 fileStat, errGo := file.Stat() 486 if errGo != nil { 487 return kv.Wrap(errGo).With("stack", stack.Trace().TrimRuntime()).With("src", src) 488 } 489 490 xfered, errGo := s.client.PutObjectWithContext(ctx, s.bucket, dest, file, fileStat.Size(), minio.PutObjectOptions{ 491 ContentType: "application/octet-stream", 492 }) 493 if errGo != nil { 494 return kv.Wrap(errGo).With("stack", stack.Trace().TrimRuntime()).With("src", src, "bucket", s.bucket, "key", dest) 495 } 496 if xfered != fileStat.Size() { 497 shortage := uint64(fileStat.Size() - xfered) 498 499 err := kv.NewError("upload truncated").With("stack", stack.Trace().TrimRuntime()) 500 return err.With("shortage", humanize.Bytes(shortage), "src", src, "bucket", s.bucket, "key", dest) 501 } 502 return nil 503 } 504 505 // Hoard is used to upload the contents of a directory to the storage server as individual files rather than a single 506 // archive 507 // 508 func (s *s3Storage) Hoard(ctx context.Context, srcDir string, keyPrefix string) (warnings []kv.Error, err kv.Error) { 509 510 prefix := keyPrefix 511 if len(prefix) == 0 { 512 prefix = s.key 513 } 514 515 // Walk files taking each uploadable file and placing into a collection 516 files := []string{} 517 errGo := filepath.Walk(srcDir, func(file string, fi os.FileInfo, err error) error { 518 if fi.IsDir() { 519 return nil 520 } 521 // We have a file include it in the upload list 522 files = append(files, file) 523 524 return nil 525 }) 526 if errGo != nil { 527 return nil, kv.Wrap(errGo).With("stack", stack.Trace().TrimRuntime()) 528 } 529 530 // Upload files 531 for _, aFile := range files { 532 key := filepath.Join(prefix, strings.TrimPrefix(aFile, srcDir)) 533 if err = s.uploadFile(ctx, aFile, key); err != nil { 534 warnings = append(warnings, err) 535 } 536 } 537 538 if len(warnings) != 0 { 539 err = kv.NewError("one or more uploads failed").With("stack", stack.Trace().TrimRuntime()).With("src", srcDir, "warnings", warnings) 540 } 541 542 return warnings, err 543 } 544 545 // Return directories as compressed artifacts to the AWS storage for an 546 // experiment 547 // 548 func (s *s3Storage) Deposit(ctx context.Context, src string, dest string) (warns []kv.Error, err kv.Error) { 549 550 if !IsTar(dest) { 551 return warns, kv.NewError("uploads must be tar, or tar compressed files").With("stack", stack.Trace().TrimRuntime()).With("key", dest) 552 } 553 554 key := dest 555 if len(key) == 0 { 556 key = s.key 557 } 558 559 files, err := NewTarWriter(src) 560 if err != nil { 561 return warns, err 562 } 563 564 if !files.HasFiles() { 565 return warns, nil 566 } 567 568 pr, pw := io.Pipe() 569 570 swErrorC := make(chan kv.Error) 571 go streamingWriter(pr, pw, files, dest, swErrorC) 572 573 s3ErrorC := make(chan kv.Error) 574 go s.s3Put(key, pr, s3ErrorC) 575 576 finished := 2 577 for { 578 select { 579 case err = <-swErrorC: 580 if nil != err { 581 return warns, err 582 } 583 swErrorC = nil 584 finished-- 585 case err = <-s3ErrorC: 586 if nil != err { 587 return warns, err 588 } 589 s3ErrorC = nil 590 finished-- 591 } 592 if finished == 0 { 593 break 594 } 595 } 596 597 pr.Close() 598 599 return warns, nil 600 } 601 602 func (s *s3Storage) s3Put(key string, pr *io.PipeReader, errorC chan kv.Error) { 603 604 errS := kv.With("key", key).With("bucket", s.bucket) 605 606 defer func() { 607 if r := recover(); r != nil { 608 errorC <- errS.NewError(fmt.Sprint(r)).With("stack", stack.Trace().TrimRuntime()) 609 } 610 close(errorC) 611 }() 612 if _, errGo := s.client.PutObject(s.bucket, key, pr, -1, minio.PutObjectOptions{}); errGo != nil { 613 errorC <- errS.Wrap(minio.ToErrorResponse(errGo)).With("stack", stack.Trace().TrimRuntime()) 614 return 615 } 616 } 617 618 type errSender struct { 619 errorC chan kv.Error 620 } 621 622 func (es *errSender) send(err kv.Error) { 623 if err != nil { 624 select { 625 case es.errorC <- err: 626 case <-time.After(30 * time.Millisecond): 627 } 628 } 629 } 630 631 func streamingWriter(pr *io.PipeReader, pw *io.PipeWriter, files *TarWriter, dest string, errorC chan kv.Error) { 632 633 sender := errSender{errorC: errorC} 634 635 defer func() { 636 if r := recover(); r != nil { 637 sender.send(kv.NewError(fmt.Sprint(r)).With("stack", stack.Trace().TrimRuntime())) 638 } 639 640 pw.Close() 641 close(errorC) 642 }() 643 644 typ, w := MimeFromExt(dest) 645 sender.send(w) 646 647 switch typ { 648 case "application/tar", "application/octet-stream": 649 tw := tar.NewWriter(pw) 650 if errGo := files.Write(tw); errGo != nil { 651 sender.send(errGo) 652 } 653 tw.Close() 654 case "application/bzip2": 655 outZ, _ := bzip2w.NewWriter(pw, &bzip2w.WriterConfig{Level: 6}) 656 tw := tar.NewWriter(outZ) 657 if errGo := files.Write(tw); errGo != nil { 658 sender.send(errGo) 659 } 660 tw.Close() 661 outZ.Close() 662 case "application/x-gzip": 663 outZ := gzip.NewWriter(pw) 664 tw := tar.NewWriter(outZ) 665 if errGo := files.Write(tw); errGo != nil { 666 sender.send(errGo) 667 } 668 tw.Close() 669 outZ.Close() 670 case "application/zip": 671 sender.send(kv.NewError("only tar archives are supported").With("stack", stack.Trace().TrimRuntime()).With("key", dest)) 672 return 673 default: 674 sender.send(kv.NewError("unrecognized upload compression").With("stack", stack.Trace().TrimRuntime()).With("key", dest)) 675 return 676 } 677 }