github.com/0chain/gosdk@v1.17.11/zboxcore/sdk/chunked_upload.go (about) 1 package sdk 2 3 import ( 4 "context" 5 "encoding/hex" 6 "fmt" 7 "io" 8 "net/http" 9 "os" 10 "path/filepath" 11 "strings" 12 "sync" 13 "sync/atomic" 14 "time" 15 16 "errors" 17 18 thrown "github.com/0chain/errors" 19 "github.com/0chain/gosdk/constants" 20 "github.com/0chain/gosdk/core/common" 21 coreEncryption "github.com/0chain/gosdk/core/encryption" 22 "github.com/0chain/gosdk/core/sys" 23 "github.com/0chain/gosdk/core/util" 24 "github.com/0chain/gosdk/zboxcore/allocationchange" 25 "github.com/0chain/gosdk/zboxcore/blockchain" 26 "github.com/0chain/gosdk/zboxcore/client" 27 "github.com/0chain/gosdk/zboxcore/encryption" 28 "github.com/0chain/gosdk/zboxcore/fileref" 29 "github.com/0chain/gosdk/zboxcore/logger" 30 "github.com/0chain/gosdk/zboxcore/zboxutil" 31 "github.com/google/uuid" 32 "github.com/klauspost/reedsolomon" 33 ) 34 35 const ( 36 DefaultUploadTimeOut = 180 * time.Second 37 ) 38 39 var ( 40 CmdFFmpeg = "ffmpeg" 41 // DefaultHashFunc default hash method for stream merkle tree 42 DefaultHashFunc = func(left, right string) string { 43 return coreEncryption.Hash(left + right) 44 } 45 46 ErrInvalidChunkSize = errors.New("chunk: chunk size is too small. it must greater than 272 if file is uploaded with encryption") 47 ErrNoEnoughSpaceLeftInAllocation = errors.New("alloc: no enough space left in allocation") 48 CancelOpCtx = make(map[string]context.CancelCauseFunc) 49 cancelLock sync.Mutex 50 CurrentMode = UploadModeMedium 51 shouldSaveProgress = true 52 HighModeWorkers = 4 53 ) 54 55 // DefaultChunkSize default chunk size for file and thumbnail 56 const DefaultChunkSize = 64 * 1024 57 58 const ( 59 // EncryptedDataPaddingSize additional bytes to save encrypted data 60 EncryptedDataPaddingSize = 16 61 // EncryptionHeaderSize encryption header size in chunk: PRE.MessageChecksum(128)+PRE.OverallChecksum(128) 62 EncryptionHeaderSize = 128 + 128 63 // ReEncryptionHeaderSize re-encryption header size in chunk 64 ReEncryptionHeaderSize = 256 65 ) 66 67 type UploadMode byte 68 69 const ( 70 UploadModeLow UploadMode = iota 71 UploadModeMedium 72 UploadModeHigh 73 ) 74 75 func SetUploadMode(mode UploadMode) { 76 CurrentMode = mode 77 } 78 79 func SetHighModeWorkers(workers int) { 80 HighModeWorkers = workers 81 } 82 83 /* 84 CreateChunkedUpload create a ChunkedUpload instance 85 86 Caller should be careful about fileReader parameter 87 io.ErrUnexpectedEOF might mean that source has completely been exhausted or there is some error 88 so that source could not fill up the buffer. Due this ambiguity it is responsibility of 89 developer to provide new io.Reader that sends io.EOF when source has been all read. 90 For example: 91 func newReader(source io.Reader) *EReader { 92 return &EReader{source} 93 } 94 95 type EReader struct { 96 io.Reader 97 } 98 99 func (r *EReader) Read(p []byte) (n int, err error) { 100 if n, err = io.ReadAtLeast(r.Reader, p, len(p)); err != nil { 101 if errors.Is(err, io.ErrUnexpectedEOF) { 102 return n, io.EOF 103 } 104 } 105 return 106 } 107 108 */ 109 110 func CreateChunkedUpload( 111 ctx context.Context, 112 workdir string, allocationObj *Allocation, 113 fileMeta FileMeta, fileReader io.Reader, 114 isUpdate, isRepair bool, 115 webStreaming bool, connectionId string, 116 opts ...ChunkedUploadOption, 117 ) (*ChunkedUpload, error) { 118 119 if allocationObj == nil { 120 return nil, thrown.Throw(constants.ErrInvalidParameter, "allocationObj") 121 } 122 123 if !isUpdate && !allocationObj.CanUpload() || isUpdate && !allocationObj.CanUpdate() { 124 return nil, thrown.Throw(constants.ErrFileOptionNotPermitted, "file_option_not_permitted ") 125 } 126 127 if webStreaming { 128 newFileReader, newFileMeta, f, err := TranscodeWebStreaming(workdir, fileReader, fileMeta) 129 defer os.Remove(f) 130 131 if err != nil { 132 return nil, thrown.New("upload_failed", err.Error()) 133 } 134 fileMeta = *newFileMeta 135 fileReader = newFileReader 136 137 } 138 139 err := ValidateRemoteFileName(fileMeta.RemoteName) 140 if err != nil { 141 return nil, err 142 } 143 144 opCode := OpUpload 145 146 if isUpdate { 147 opCode = OpUpdate 148 } 149 150 consensus := Consensus{ 151 RWMutex: &sync.RWMutex{}, 152 consensusThresh: allocationObj.consensusThreshold, 153 fullconsensus: allocationObj.fullconsensus, 154 } 155 156 uploadMask := zboxutil.NewUint128(1).Lsh(uint64(len(allocationObj.Blobbers))).Sub64(1) 157 if isRepair { 158 opCode = OpUpdate 159 consensus.fullconsensus = uploadMask.CountOnes() 160 consensus.consensusThresh = 1 161 } 162 163 su := &ChunkedUpload{ 164 allocationObj: allocationObj, 165 client: zboxutil.Client, 166 fileMeta: fileMeta, 167 fileReader: fileReader, 168 169 uploadMask: uploadMask, 170 chunkSize: DefaultChunkSize, 171 chunkNumber: 100, 172 encryptOnUpload: false, 173 webStreaming: false, 174 175 consensus: consensus, //nolint 176 uploadTimeOut: DefaultUploadTimeOut, 177 commitTimeOut: DefaultUploadTimeOut, 178 maskMu: &sync.Mutex{}, 179 opCode: opCode, 180 } 181 182 // su.ctx, su.ctxCncl = context.WithCancel(allocationObj.ctx) 183 su.ctx, su.ctxCncl = context.WithCancelCause(ctx) 184 185 if isUpdate { 186 su.httpMethod = http.MethodPut 187 su.buildChange = func(ref *fileref.FileRef, _ uuid.UUID, ts common.Timestamp) allocationchange.AllocationChange { 188 change := &allocationchange.UpdateFileChange{} 189 change.NewFile = ref 190 change.NumBlocks = ref.NumBlocks 191 change.Operation = constants.FileOperationUpdate 192 change.Size = ref.Size 193 return change 194 } 195 } else { 196 su.httpMethod = http.MethodPost 197 su.buildChange = func(ref *fileref.FileRef, uid uuid.UUID, ts common.Timestamp) allocationchange.AllocationChange { 198 change := &allocationchange.NewFileChange{} 199 change.File = ref 200 change.NumBlocks = ref.NumBlocks 201 change.Operation = constants.FileOperationInsert 202 change.Size = ref.Size 203 change.Uuid = uid 204 return change 205 } 206 } 207 208 su.workdir = filepath.Join(workdir, ".zcn") 209 210 //create upload folder to save progress 211 err = sys.Files.MkdirAll(filepath.Join(su.workdir, "upload"), 0766) 212 if err != nil { 213 return nil, err 214 } 215 216 for _, opt := range opts { 217 opt(su) 218 } 219 220 if su.progressStorer == nil && shouldSaveProgress { 221 su.progressStorer = createFsChunkedUploadProgress(su.ctx) 222 } 223 224 su.loadProgress() 225 su.shardSize = getShardSize(su.fileMeta.ActualSize, su.allocationObj.DataShards, su.encryptOnUpload) 226 if su.fileHasher == nil { 227 su.fileHasher = CreateFileHasher() 228 } 229 230 // encrypt option has been changed. upload it from scratch 231 // chunkSize has been changed. upload it from scratch 232 // actual size has been changed. upload it from scratch 233 if su.progress.ChunkSize != su.chunkSize || su.progress.EncryptOnUpload != su.encryptOnUpload || su.progress.ActualSize != su.fileMeta.ActualSize || su.progress.ChunkNumber != su.chunkNumber || su.progress.ConnectionID == "" { 234 su.progress.ChunkSize = 0 // reset chunk size 235 } 236 237 su.createUploadProgress(connectionId) 238 239 su.fileErasureEncoder, err = reedsolomon.New( 240 su.allocationObj.DataShards, 241 su.allocationObj.ParityShards, 242 reedsolomon.WithAutoGoroutines(int(su.chunkSize)), 243 ) 244 if err != nil { 245 return nil, err 246 } 247 248 if su.encryptOnUpload { 249 su.fileEncscheme = su.createEncscheme() 250 if su.fileEncscheme == nil { 251 return nil, thrown.New("upload_failed", "Failed to create encryption scheme") 252 } 253 if su.chunkSize <= EncryptionHeaderSize+EncryptedDataPaddingSize { 254 return nil, ErrInvalidChunkSize 255 } 256 257 } 258 259 su.writeMarkerMutex, err = CreateWriteMarkerMutex(client.GetClient(), su.allocationObj) 260 if err != nil { 261 return nil, err 262 } 263 264 blobbers := su.allocationObj.Blobbers 265 if len(blobbers) == 0 { 266 return nil, thrown.New("no_blobbers", "Unable to find blobbers") 267 } 268 269 su.blobbers = make([]*ChunkedUploadBlobber, len(blobbers)) 270 271 for i := 0; i < len(blobbers); i++ { 272 273 su.blobbers[i] = &ChunkedUploadBlobber{ 274 writeMarkerMutex: su.writeMarkerMutex, 275 progress: su.progress.Blobbers[i], 276 blobber: su.allocationObj.Blobbers[i], 277 fileRef: &fileref.FileRef{ 278 Ref: fileref.Ref{ 279 Name: su.fileMeta.RemoteName, 280 Path: su.fileMeta.RemotePath, 281 Type: fileref.FILE, 282 AllocationID: su.allocationObj.ID, 283 }, 284 }, 285 } 286 } 287 cReader, err := createChunkReader(su.fileReader, fileMeta.ActualSize, int64(su.chunkSize), su.allocationObj.DataShards, su.allocationObj.ParityShards, su.encryptOnUpload, su.uploadMask, su.fileErasureEncoder, su.fileEncscheme, su.fileHasher, su.chunkNumber) 288 289 if err != nil { 290 return nil, err 291 } 292 293 su.chunkReader = cReader 294 295 su.formBuilder = CreateChunkedUploadFormBuilder() 296 297 su.isRepair = isRepair 298 uploadWorker, uploadRequest := calculateWorkersAndRequests(su.allocationObj.DataShards, len(su.blobbers), su.chunkNumber) 299 su.uploadChan = make(chan UploadData, uploadRequest) 300 su.uploadWorkers = uploadWorker 301 return su, nil 302 } 303 304 func calculateWorkersAndRequests(dataShards, totalShards, chunknumber int) (uploadWorkers int, uploadRequests int) { 305 if totalShards < 4 { 306 uploadWorkers = 4 307 } else { 308 switch CurrentMode { 309 case UploadModeLow: 310 uploadWorkers = 1 311 case UploadModeMedium: 312 uploadWorkers = 2 313 case UploadModeHigh: 314 uploadWorkers = HighModeWorkers 315 } 316 } 317 318 if chunknumber*dataShards < 640 && !IsWasm { 319 uploadRequests = 4 320 } else { 321 uploadRequests = 2 322 } 323 return 324 } 325 326 // progressID build local progress id with [allocationid]_[Hash(LocalPath+"_"+RemotePath)]_[RemoteName] format 327 func (su *ChunkedUpload) progressID() string { 328 329 if len(su.allocationObj.ID) > 8 { 330 return filepath.Join(su.workdir, "upload", "u"+su.allocationObj.ID[:8]+"_"+su.fileMeta.FileID()) 331 } 332 333 return filepath.Join(su.workdir, "upload", su.allocationObj.ID+"_"+su.fileMeta.FileID()) 334 } 335 336 // loadProgress load progress from ~/.zcn/upload/[progressID] 337 func (su *ChunkedUpload) loadProgress() { 338 // ChunkIndex starts with 0, so default value should be -1 339 su.progress.ChunkIndex = -1 340 341 progressID := su.progressID() 342 if shouldSaveProgress { 343 progress := su.progressStorer.Load(progressID) 344 345 if progress != nil { 346 su.progress = *progress 347 su.progress.ID = progressID 348 } 349 } 350 } 351 352 // saveProgress save progress to ~/.zcn/upload/[progressID] 353 func (su *ChunkedUpload) saveProgress() { 354 if su.progressStorer != nil { 355 su.progressStorer.Save(su.progress) 356 } 357 } 358 359 // removeProgress remove progress info once it is done 360 func (su *ChunkedUpload) removeProgress() { 361 if su.progressStorer != nil { 362 su.progressStorer.Remove(su.progress.ID) //nolint 363 } 364 } 365 366 func (su *ChunkedUpload) updateProgress(chunkIndex int, upMask zboxutil.Uint128) { 367 if su.progressStorer != nil { 368 if chunkIndex > su.progress.ChunkIndex { 369 su.progressStorer.Update(su.progress.ID, chunkIndex, upMask) 370 } 371 } 372 } 373 374 func (su *ChunkedUpload) createEncscheme() encryption.EncryptionScheme { 375 encscheme := encryption.NewEncryptionScheme() 376 377 if len(su.progress.EncryptPrivateKey) > 0 { 378 379 privateKey, _ := hex.DecodeString(su.progress.EncryptPrivateKey) 380 381 err := encscheme.InitializeWithPrivateKey(privateKey) 382 if err != nil { 383 return nil 384 } 385 } else { 386 mnemonic := client.GetClient().Mnemonic 387 if mnemonic == "" { 388 return nil 389 } 390 privateKey, err := encscheme.Initialize(mnemonic) 391 if err != nil { 392 return nil 393 } 394 395 su.progress.EncryptPrivateKey = hex.EncodeToString(privateKey) 396 } 397 if len(su.progress.EncryptedKeyPoint) > 0 { 398 err := encscheme.InitForEncryptionWithPoint("filetype:audio", su.progress.EncryptedKeyPoint) 399 if err != nil { 400 return nil 401 } 402 } else { 403 encscheme.InitForEncryption("filetype:audio") 404 su.progress.EncryptedKeyPoint = encscheme.GetEncryptedKeyPoint() 405 } 406 su.encryptedKey = encscheme.GetEncryptedKey() 407 return encscheme 408 } 409 410 func (su *ChunkedUpload) process() error { 411 if su.statusCallback != nil { 412 su.statusCallback.Started(su.allocationObj.ID, su.fileMeta.RemotePath, su.opCode, int(su.fileMeta.ActualSize)+int(su.fileMeta.ActualThumbnailSize)) 413 } 414 su.startProcessor() 415 defer su.chunkReader.Close() 416 defer su.ctxCncl(nil) 417 for { 418 419 chunks, err := su.readChunks(su.chunkNumber) 420 421 // chunk, err := su.chunkReader.Next() 422 if err != nil { 423 if su.statusCallback != nil { 424 su.statusCallback.Error(su.allocationObj.ID, su.fileMeta.RemotePath, su.opCode, err) 425 } 426 return err 427 } 428 //logger.Logger.Debug("Read chunk #", chunk.Index) 429 430 su.shardUploadedSize += chunks.totalFragmentSize 431 su.progress.ReadLength += chunks.totalReadSize 432 433 if chunks.isFinal { 434 if su.fileMeta.ActualHash == "" { 435 su.fileMeta.ActualHash, err = su.chunkReader.GetFileHash() 436 if err != nil { 437 if su.statusCallback != nil { 438 su.statusCallback.Error(su.allocationObj.ID, su.fileMeta.RemotePath, su.opCode, err) 439 } 440 return err 441 } 442 } 443 if su.fileMeta.ActualSize == 0 { 444 su.fileMeta.ActualSize = su.progress.ReadLength 445 su.shardSize = getShardSize(su.fileMeta.ActualSize, su.allocationObj.DataShards, su.encryptOnUpload) 446 } else if su.fileMeta.ActualSize != su.progress.ReadLength && su.thumbnailBytes == nil { 447 if su.statusCallback != nil { 448 su.statusCallback.Error(su.allocationObj.ID, su.fileMeta.RemotePath, su.opCode, thrown.New("upload_failed", "Upload failed. Uploaded size does not match with actual size: "+fmt.Sprintf("%d != %d", su.fileMeta.ActualSize, su.progress.ReadLength))) 449 } 450 return thrown.New("upload_failed", "Upload failed. Uploaded size does not match with actual size: "+fmt.Sprintf("%d != %d", su.fileMeta.ActualSize, su.progress.ReadLength)) 451 } 452 } 453 454 err = su.processUpload( 455 chunks.chunkStartIndex, chunks.chunkEndIndex, 456 chunks.fileShards, chunks.thumbnailShards, 457 chunks.isFinal, chunks.totalReadSize, 458 ) 459 if err != nil { 460 if su.statusCallback != nil { 461 su.statusCallback.Error(su.allocationObj.ID, su.fileMeta.RemotePath, su.opCode, err) 462 } 463 return err 464 } 465 466 // last chunk might 0 with io.EOF 467 // https://stackoverflow.com/questions/41208359/how-to-test-eof-on-io-reader-in-go 468 if chunks.isFinal { 469 break 470 } 471 } 472 return nil 473 } 474 475 // Start start/resume upload 476 func (su *ChunkedUpload) Start() error { 477 now := time.Now() 478 479 err := su.process() 480 if err != nil { 481 return err 482 } 483 su.ctx, su.ctxCncl = context.WithCancelCause(su.allocationObj.ctx) 484 defer su.ctxCncl(nil) 485 elapsedProcess := time.Since(now) 486 487 blobbers := make([]*blockchain.StorageNode, len(su.blobbers)) 488 for i, b := range su.blobbers { 489 blobbers[i] = b.blobber 490 } 491 if su.addConsensus == int32(su.consensus.fullconsensus) { 492 return thrown.New("upload_failed", "Duplicate upload detected") 493 } 494 495 err = su.writeMarkerMutex.Lock( 496 su.ctx, &su.uploadMask, su.maskMu, 497 blobbers, &su.consensus, int(su.addConsensus), su.uploadTimeOut, 498 su.progress.ConnectionID) 499 500 if err != nil { 501 if su.statusCallback != nil { 502 su.statusCallback.Error(su.allocationObj.ID, su.fileMeta.RemotePath, su.opCode, err) 503 } 504 return err 505 } 506 elapsedLock := time.Since(now) - elapsedProcess 507 508 defer su.writeMarkerMutex.Unlock( 509 su.ctx, su.uploadMask, blobbers, su.uploadTimeOut, su.progress.ConnectionID) //nolint: errcheck 510 511 defer func() { 512 elapsedProcessCommit := time.Since(now) - elapsedProcess - elapsedLock 513 logger.Logger.Info("[ChunkedUpload - start] Timings:\n", 514 fmt.Sprintf("allocation_id: %s", su.allocationObj.ID), 515 fmt.Sprintf("process: %d ms", elapsedProcess.Milliseconds()), 516 fmt.Sprintf("Lock: %d ms", elapsedLock.Milliseconds()), 517 fmt.Sprintf("processCommit: %d ms", elapsedProcessCommit.Milliseconds())) 518 }() 519 return su.processCommit() 520 } 521 522 func (su *ChunkedUpload) readChunks(num int) (*batchChunksData, error) { 523 data := &batchChunksData{ 524 chunkStartIndex: -1, 525 chunkEndIndex: -1, 526 } 527 528 for i := 0; i < num; i++ { 529 chunk, err := su.chunkReader.Next() 530 531 if err != nil { 532 return nil, err 533 } 534 //logger.Logger.Debug("Read chunk #", chunk.Index) 535 if i == 0 { 536 data.chunkStartIndex = chunk.Index 537 data.chunkEndIndex = chunk.Index 538 } else { 539 data.chunkEndIndex = chunk.Index 540 } 541 542 data.totalFragmentSize += chunk.FragmentSize 543 data.totalReadSize += chunk.ReadSize 544 545 // upload entire thumbnail in first chunk request only 546 if chunk.Index == 0 && len(su.thumbnailBytes) > 0 { 547 548 data.thumbnailShards, err = su.chunkReader.Read(su.thumbnailBytes) 549 if err != nil { 550 return nil, err 551 } 552 } 553 554 if data.fileShards == nil { 555 data.fileShards = make([]blobberShards, len(chunk.Fragments)) 556 } 557 558 // concact blobber's fragments 559 if chunk.ReadSize > 0 { 560 for i, v := range chunk.Fragments { 561 //blobber i 562 data.fileShards[i] = append(data.fileShards[i], v) 563 } 564 } 565 566 if chunk.IsFinal { 567 data.isFinal = true 568 break 569 } 570 } 571 su.chunkReader.Reset() 572 return data, nil 573 } 574 575 // processCommit commit shard upload on its blobber 576 func (su *ChunkedUpload) processCommit() error { 577 defer su.removeProgress() 578 579 logger.Logger.Info("Submitting for commit") 580 su.consensus.Reset() 581 su.consensus.consensus = int(su.addConsensus) 582 wg := &sync.WaitGroup{} 583 var pos uint64 584 uid := util.GetNewUUID() 585 timestamp := common.Now() 586 for i := su.uploadMask; !i.Equals64(0); i = i.And(zboxutil.NewUint128(1).Lsh(pos).Not()) { 587 pos = uint64(i.TrailingZeros()) 588 589 blobber := su.blobbers[pos] 590 591 //fixed numBlocks 592 blobber.fileRef.ChunkSize = su.chunkSize 593 blobber.fileRef.NumBlocks = int64(su.progress.ChunkIndex + 1) 594 595 blobber.commitChanges = append(blobber.commitChanges, 596 su.buildChange(blobber.fileRef, uid, timestamp)) 597 598 wg.Add(1) 599 go func(b *ChunkedUploadBlobber, pos uint64) { 600 defer wg.Done() 601 err := b.processCommit(context.TODO(), su, pos, int64(timestamp)) 602 if err != nil { 603 b.commitResult = ErrorCommitResult(err.Error()) 604 } 605 606 }(blobber, pos) 607 } 608 609 wg.Wait() 610 611 if !su.consensus.isConsensusOk() { 612 consensus := su.consensus.getConsensus() 613 err := thrown.New("consensus_not_met", 614 fmt.Sprintf("Upload commit failed. Required consensus atleast %d, got %d", 615 su.consensus.consensusThresh, consensus)) 616 617 if su.statusCallback != nil { 618 su.statusCallback.Error(su.allocationObj.ID, su.fileMeta.RemotePath, su.opCode, err) 619 } 620 return err 621 } 622 623 if su.statusCallback != nil { 624 su.statusCallback.Completed(su.allocationObj.ID, su.fileMeta.RemotePath, su.fileMeta.RemoteName, su.fileMeta.MimeType, int(su.progress.UploadLength), su.opCode) 625 } 626 627 return nil 628 } 629 630 // getShardSize will return the size of data of a file each blobber is getting. 631 func getShardSize(dataSize int64, dataShards int, isEncrypted bool) int64 { 632 if dataSize == 0 { 633 return 0 634 } 635 chunkSize := int64(DefaultChunkSize) 636 if isEncrypted { 637 chunkSize -= (EncryptedDataPaddingSize + EncryptionHeaderSize) 638 } 639 640 totalChunkSize := chunkSize * int64(dataShards) 641 642 n := dataSize / totalChunkSize 643 r := dataSize % totalChunkSize 644 645 var remainderShards int64 646 if isEncrypted { 647 remainderShards = (r+int64(dataShards)-1)/int64(dataShards) + EncryptedDataPaddingSize + EncryptionHeaderSize 648 } else { 649 remainderShards = (r + int64(dataShards) - 1) / int64(dataShards) 650 } 651 return n*DefaultChunkSize + remainderShards 652 } 653 654 func (su *ChunkedUpload) uploadProcessor() { 655 for { 656 select { 657 case <-su.ctx.Done(): 658 return 659 case uploadData, ok := <-su.uploadChan: 660 if !ok { 661 return 662 } 663 su.uploadToBlobbers(uploadData) //nolint:errcheck 664 su.uploadWG.Done() 665 } 666 } 667 } 668 669 func (su *ChunkedUpload) uploadToBlobbers(uploadData UploadData) error { 670 select { 671 case <-su.ctx.Done(): 672 return context.Cause(su.ctx) 673 default: 674 } 675 consensus := Consensus{ 676 RWMutex: &sync.RWMutex{}, 677 consensusThresh: su.consensus.consensusThresh, 678 fullconsensus: su.consensus.fullconsensus, 679 } 680 681 wgErrors := make(chan error, len(su.blobbers)) 682 ctx, cancel := context.WithCancel(su.ctx) 683 defer cancel() 684 var pos uint64 685 var errCount int32 686 var wg sync.WaitGroup 687 for i := su.uploadMask; !i.Equals64(0); i = i.And(zboxutil.NewUint128(1).Lsh(pos).Not()) { 688 pos = uint64(i.TrailingZeros()) 689 wg.Add(1) 690 go func(pos uint64) { 691 defer wg.Done() 692 err := su.blobbers[pos].sendUploadRequest(ctx, su, uploadData.isFinal, su.encryptedKey, uploadData.uploadBody[pos].dataBuffers, uploadData.uploadBody[pos].formData, uploadData.uploadBody[pos].contentSlice, pos, &consensus) 693 694 if err != nil { 695 if strings.Contains(err.Error(), "duplicate") { 696 su.consensus.Done() 697 errC := atomic.AddInt32(&su.addConsensus, 1) 698 if errC >= int32(su.consensus.consensusThresh) { 699 wgErrors <- err 700 } 701 return 702 } 703 logger.Logger.Error("error during sendUploadRequest", err, " connectionID: ", su.progress.ConnectionID) 704 errC := atomic.AddInt32(&errCount, 1) 705 if errC > int32(su.allocationObj.ParityShards-1) { // If atleast data shards + 1 number of blobbers can process the upload, it can be repaired later 706 wgErrors <- err 707 } 708 } 709 }(pos) 710 } 711 wg.Wait() 712 close(wgErrors) 713 for err := range wgErrors { 714 su.ctxCncl(thrown.New("upload_failed", fmt.Sprintf("Upload failed. %s", err))) 715 return err 716 } 717 if !consensus.isConsensusOk() { 718 err := thrown.New("consensus_not_met", fmt.Sprintf("Upload failed File not found for path %s. Required consensus atleast %d, got %d", 719 su.fileMeta.RemotePath, consensus.consensusThresh, consensus.getConsensus())) 720 su.ctxCncl(err) 721 return err 722 } 723 if uploadData.uploadLength > 0 { 724 index := uploadData.chunkEndIndex 725 uploadLength := uploadData.uploadLength 726 go su.updateProgress(index, su.uploadMask) 727 if su.statusCallback != nil { 728 su.statusCallback.InProgress(su.allocationObj.ID, su.fileMeta.RemotePath, su.opCode, int(atomic.AddInt64(&su.progress.UploadLength, uploadLength)), nil) 729 } 730 } 731 uploadData = UploadData{} // release memory 732 return nil 733 }