github.com/0chain/gosdk@v1.17.11/zboxcore/sdk/chunked_upload_process.go (about)

     1  //go:build !js && !wasm
     2  // +build !js,!wasm
     3  
     4  package sdk
     5  
     6  import (
     7  	"context"
     8  	"fmt"
     9  	"sync"
    10  	"sync/atomic"
    11  
    12  	thrown "github.com/0chain/errors"
    13  	"github.com/0chain/gosdk/zboxcore/zboxutil"
    14  )
    15  
    16  // createUploadProgress create a new UploadProgress
    17  func (su *ChunkedUpload) createUploadProgress(connectionId string) {
    18  	if su.progress.ChunkSize <= 0 {
    19  		su.progress = UploadProgress{
    20  			ConnectionID:      connectionId,
    21  			ChunkIndex:        -1,
    22  			ChunkSize:         su.chunkSize,
    23  			EncryptOnUpload:   su.encryptOnUpload,
    24  			EncryptedKeyPoint: su.encryptedKeyPoint,
    25  			ActualSize:        su.fileMeta.ActualSize,
    26  			ChunkNumber:       su.chunkNumber,
    27  		}
    28  	}
    29  	su.progress.Blobbers = make([]*UploadBlobberStatus, su.allocationObj.DataShards+su.allocationObj.ParityShards)
    30  
    31  	for i := 0; i < len(su.progress.Blobbers); i++ {
    32  		su.progress.Blobbers[i] = &UploadBlobberStatus{
    33  			Hasher: CreateHasher(su.shardSize),
    34  		}
    35  	}
    36  
    37  	su.progress.ID = su.progressID()
    38  	su.saveProgress()
    39  }
    40  
    41  // processUpload process upload fragment to its blobber
    42  func (su *ChunkedUpload) processUpload(chunkStartIndex, chunkEndIndex int,
    43  	fileShards []blobberShards, thumbnailShards blobberShards,
    44  	isFinal bool, uploadLength int64) error {
    45  
    46  	//chunk has not be uploaded yet
    47  	if chunkEndIndex <= su.progress.ChunkIndex {
    48  		// Write data to hashers
    49  		for i, blobberShard := range fileShards {
    50  			hasher := su.blobbers[i].progress.Hasher
    51  			for _, chunkBytes := range blobberShard {
    52  				err := hasher.WriteToFixedMT(chunkBytes)
    53  				if err != nil {
    54  					if su.statusCallback != nil {
    55  						su.statusCallback.Error(su.allocationObj.ID, su.fileMeta.RemotePath, su.opCode, err)
    56  					}
    57  					return err
    58  				}
    59  				err = hasher.WriteToValidationMT(chunkBytes)
    60  				if err != nil {
    61  					if su.statusCallback != nil {
    62  						su.statusCallback.Error(su.allocationObj.ID, su.fileMeta.RemotePath, su.opCode, err)
    63  					}
    64  					return err
    65  				}
    66  			}
    67  		}
    68  		return nil
    69  	}
    70  
    71  	var (
    72  		errCount       int32
    73  		finalBuffer    []blobberData
    74  		pos            uint64
    75  		wg             sync.WaitGroup
    76  		lastBufferOnly bool
    77  	)
    78  	if isFinal {
    79  		finalBuffer = make([]blobberData, len(su.blobbers))
    80  	}
    81  	blobberUpload := UploadData{
    82  		chunkStartIndex: chunkStartIndex,
    83  		chunkEndIndex:   chunkEndIndex,
    84  		isFinal:         isFinal,
    85  		uploadBody:      make([]blobberData, len(su.blobbers)),
    86  		uploadLength:    uploadLength,
    87  	}
    88  
    89  	wgErrors := make(chan error, len(su.blobbers))
    90  	if len(fileShards) == 0 {
    91  		return thrown.New("upload_failed", "Upload failed. No data to upload")
    92  	}
    93  
    94  	for i := su.uploadMask; !i.Equals64(0); i = i.And(zboxutil.NewUint128(1).Lsh(pos).Not()) {
    95  		pos = uint64(i.TrailingZeros())
    96  		blobber := su.blobbers[pos]
    97  		blobber.progress.UploadLength += uploadLength
    98  
    99  		var thumbnailChunkData []byte
   100  
   101  		if len(thumbnailShards) > 0 {
   102  			thumbnailChunkData = thumbnailShards[pos]
   103  		}
   104  
   105  		wg.Add(1)
   106  		go func(b *ChunkedUploadBlobber, thumbnailChunkData []byte, pos uint64) {
   107  			defer wg.Done()
   108  			uploadData, err := su.formBuilder.Build(
   109  				&su.fileMeta, blobber.progress.Hasher, su.progress.ConnectionID,
   110  				su.chunkSize, chunkStartIndex, chunkEndIndex, isFinal, su.encryptedKey, su.progress.EncryptedKeyPoint,
   111  				fileShards[pos], thumbnailChunkData, su.shardSize)
   112  			if err != nil {
   113  				errC := atomic.AddInt32(&errCount, 1)
   114  				if errC > int32(su.allocationObj.ParityShards-1) { // If atleast data shards + 1 number of blobbers can process the upload, it can be repaired later
   115  					wgErrors <- err
   116  				}
   117  				return
   118  			}
   119  			if isFinal {
   120  				finalBuffer[pos] = blobberData{
   121  					dataBuffers:  uploadData.dataBuffers[len(uploadData.dataBuffers)-1:],
   122  					formData:     uploadData.formData,
   123  					contentSlice: uploadData.contentSlice[len(uploadData.contentSlice)-1:],
   124  				}
   125  				if len(uploadData.dataBuffers) == 1 {
   126  					lastBufferOnly = true
   127  					return
   128  				}
   129  				uploadData.dataBuffers = uploadData.dataBuffers[:len(uploadData.dataBuffers)-1]
   130  			}
   131  			blobberUpload.uploadBody[pos] = uploadData
   132  		}(blobber, thumbnailChunkData, pos)
   133  	}
   134  
   135  	wg.Wait()
   136  	close(wgErrors)
   137  	fileShards = nil
   138  	for err := range wgErrors {
   139  		su.removeProgress()
   140  		return thrown.New("upload_failed", fmt.Sprintf("Upload failed. %s", err))
   141  	}
   142  	if !lastBufferOnly {
   143  		su.uploadWG.Add(1)
   144  		select {
   145  		case <-su.ctx.Done():
   146  			return context.Cause(su.ctx)
   147  		case su.uploadChan <- blobberUpload:
   148  		}
   149  	}
   150  
   151  	if isFinal {
   152  		close(su.uploadChan)
   153  		su.uploadWG.Wait()
   154  		select {
   155  		case <-su.ctx.Done():
   156  			return context.Cause(su.ctx)
   157  		default:
   158  		}
   159  		blobberUpload.uploadBody = finalBuffer
   160  		return su.uploadToBlobbers(blobberUpload)
   161  	}
   162  	return nil
   163  }
   164  
   165  func (su *ChunkedUpload) startProcessor() {
   166  	for i := 0; i < su.uploadWorkers; i++ {
   167  		go su.uploadProcessor()
   168  	}
   169  }