github.com/0chain/gosdk@v1.17.11/zboxcore/sdk/reader.go (about)

     1  package sdk
     2  
     3  import (
     4  	"context"
     5  	"encoding/base64"
     6  	"encoding/json"
     7  	"fmt"
     8  	"io"
     9  	"math"
    10  	"sync"
    11  
    12  	"github.com/0chain/errors"
    13  	"github.com/0chain/gosdk/zboxcore/marker"
    14  	"github.com/0chain/gosdk/zboxcore/zboxutil"
    15  )
    16  
    17  const (
    18  	// EncryptionOverHead File size increases by 16 bytes after encryption. Two checksums i.e. MessageChecksum and OverallChecksum has
    19  	// 128 bytes size each.
    20  	// So total overhead for each encrypted data is 16 + 128*2 = 272
    21  	EncryptionOverHead = 272
    22  	ChecksumSize       = 256
    23  	HeaderSize         = 128
    24  	BlockSize          = 64 * KB
    25  )
    26  
    27  // error codes
    28  const (
    29  	NotEnoughTokens              = "not_enough_tokens"
    30  	InvalidAuthTicket            = "invalid_authticket"
    31  	InvalidShare                 = "invalid_share"
    32  	InvalidRead                  = "invalid_read"
    33  	ExceededMaxOffsetValue       = "exceeded_max_offset_value"
    34  	NegativeOffsetResultantValue = "negative_offset_resultant_value"
    35  	InvalidWhenceValue           = "invalid_whence_value"
    36  )
    37  
    38  // errors
    39  var ErrInvalidRead = errors.New(InvalidRead, "want_size is <= 0")
    40  
    41  const (
    42  	// BlocksFor10MB is number of blocks required for to make 10MB data.
    43  	// It is simply calculated as 10MB / 64KB = 160
    44  	// If blobber cannot respond with 10MB data then client can use numBlocks field
    45  	// in StreamDownload struct
    46  	BlocksFor10MB = 160
    47  )
    48  
    49  type StreamDownloadOption struct {
    50  	ContentMode     string
    51  	AuthTicket      string
    52  	BlocksPerMarker uint // Number of blocks to download per request
    53  	VerifyDownload  bool // Verify downloaded data against ValidaitonRoot.
    54  }
    55  
    56  type StreamDownload struct {
    57  	*DownloadRequest
    58  	offset   int64
    59  	open     bool
    60  	fileSize int64
    61  }
    62  
    63  func (sd *StreamDownload) Close() error {
    64  	sd.open = false
    65  	return nil
    66  }
    67  
    68  func (sd *StreamDownload) Seek(offset int64, whence int) (int64, error) {
    69  	switch whence {
    70  	case io.SeekStart:
    71  		if offset > sd.fileSize {
    72  			return 0, errors.New(ExceededMaxOffsetValue, "")
    73  		}
    74  		sd.offset = offset
    75  	case io.SeekCurrent:
    76  		if sd.offset+offset >= sd.fileSize {
    77  			return 0, errors.New(ExceededMaxOffsetValue, "")
    78  		}
    79  		sd.offset += offset
    80  	case io.SeekEnd:
    81  		newOffset := sd.fileSize - offset
    82  		if newOffset < 0 {
    83  			return 0, errors.New(NegativeOffsetResultantValue, "")
    84  		}
    85  		sd.offset = offset
    86  	default:
    87  		return 0, errors.New(InvalidWhenceValue,
    88  			fmt.Sprintf("expected 0, 1 or 2, provided %d", whence))
    89  	}
    90  	return sd.offset, nil
    91  }
    92  
    93  // getStartAndEndIndex will return start and end index based on fileSize, offset and wantSize value
    94  func (sd *StreamDownload) getStartAndEndIndex(wantsize int64) (int64, int64) {
    95  	sizePerBlobber := (sd.fileSize +
    96  		int64(sd.datashards) - 1) / int64(sd.datashards) // equivalent to ceil(filesize/datashards)
    97  
    98  	totalBlocksPerBlobber := (sizePerBlobber +
    99  		int64(sd.effectiveBlockSize) - 1) / int64(sd.effectiveBlockSize)
   100  
   101  	effectiveChunkSize := sd.effectiveBlockSize * sd.datashards
   102  	startInd := sd.offset / int64(effectiveChunkSize)
   103  	endInd := (sd.offset + wantsize + int64(effectiveChunkSize) - 1) / int64(effectiveChunkSize)
   104  	if endInd > totalBlocksPerBlobber {
   105  		endInd = totalBlocksPerBlobber
   106  	}
   107  	return startInd, endInd
   108  }
   109  
   110  func (sd *StreamDownload) Read(b []byte) (int, error) {
   111  	if !sd.open {
   112  		return 0, errors.New("file_closed", "")
   113  	}
   114  
   115  	if sd.offset >= sd.fileSize {
   116  		return 0, io.EOF
   117  	}
   118  
   119  	wantSize := int64(math.Min(float64(len(b)), float64(sd.fileSize-sd.offset)))
   120  	if wantSize <= 0 {
   121  		return 0, ErrInvalidRead
   122  	}
   123  
   124  	startInd, endInd := sd.getStartAndEndIndex(wantSize)
   125  	var numBlocks int64
   126  	if sd.numBlocks > 0 {
   127  		numBlocks = sd.numBlocks
   128  	} else {
   129  		numBlocks = endInd - startInd
   130  		if numBlocks > BlocksFor10MB {
   131  			numBlocks = BlocksFor10MB
   132  		}
   133  	}
   134  
   135  	wantBlocksPerShard := (wantSize + int64(sd.effectiveBlockSize) - 1) / int64(sd.effectiveBlockSize)
   136  	sd.blocksPerShard = wantBlocksPerShard
   137  
   138  	// effectiveChunkSize := sd.effectiveBlockSize * sd.datashards
   139  	n := 0
   140  	for startInd < endInd {
   141  		if startInd+numBlocks > endInd {
   142  			// this numBlocks should not exceed number greater than required data
   143  			// otherwise `no shard data` error will occur in erasure reconstruction.
   144  			numBlocks = endInd - startInd
   145  		}
   146  
   147  		data, err := sd.getBlocksData(startInd, numBlocks, true)
   148  		if err != nil {
   149  			return 0, err
   150  		}
   151  
   152  		// offset := sd.offset % int64(effectiveChunkSize)
   153  		// size of buffer `b` can be any number but we don't want to copy more than want size
   154  		// offset is important parameter because without it data will be corrupted.
   155  		// If previously set offset was 65536 + 1(block number 0) and we get data block with block number 1
   156  		// then we should not copy whole data to the buffer rather after offset.
   157  		n += copy(b[n:wantSize], data[0][0])
   158  
   159  		startInd += numBlocks
   160  	}
   161  	sd.offset += int64(n)
   162  	return n, nil
   163  }
   164  
   165  // GetDStorageFileReader will initialize erasure decoder, decrypter if file is encrypted and other
   166  // necessary fields and returns a reader that comply with io.ReadSeekCloser interface.
   167  func GetDStorageFileReader(alloc *Allocation, ref *ORef, sdo *StreamDownloadOption) (io.ReadSeekCloser, error) {
   168  
   169  	sd := &StreamDownload{
   170  		DownloadRequest: &DownloadRequest{
   171  			allocationID:      alloc.ID,
   172  			allocationTx:      alloc.Tx,
   173  			allocOwnerID:      alloc.Owner,
   174  			allocOwnerPubKey:  alloc.OwnerPublicKey,
   175  			datashards:        alloc.DataShards,
   176  			parityshards:      alloc.ParityShards,
   177  			remotefilepath:    ref.Path,
   178  			numBlocks:         int64(sdo.BlocksPerMarker),
   179  			validationRootMap: make(map[string]*blobberFile),
   180  			shouldVerify:      sdo.VerifyDownload,
   181  			Consensus: Consensus{
   182  				RWMutex:         &sync.RWMutex{},
   183  				fullconsensus:   alloc.fullconsensus,
   184  				consensusThresh: alloc.consensusThreshold,
   185  			},
   186  			blobbers:           alloc.Blobbers,
   187  			downloadMask:       zboxutil.NewUint128(1).Lsh(uint64(len(alloc.Blobbers))).Sub64(1),
   188  			effectiveBlockSize: BlockSize,
   189  			chunkSize:          BlockSize,
   190  			maskMu:             &sync.Mutex{},
   191  			connectionID:       zboxutil.NewConnectionId(),
   192  		},
   193  		open: true,
   194  	}
   195  
   196  	if sdo.ContentMode == DOWNLOAD_CONTENT_THUMB {
   197  		sd.fileSize = ref.ActualThumbnailSize
   198  	} else {
   199  		sd.fileSize = ref.ActualFileSize
   200  	}
   201  
   202  	if sdo.AuthTicket != "" {
   203  		sEnc, err := base64.StdEncoding.DecodeString(sdo.AuthTicket)
   204  		if err != nil {
   205  			return nil, errors.New("auth_ticket_decode_error", "Error decoding the auth ticket."+err.Error())
   206  		}
   207  		at := &marker.AuthTicket{}
   208  		err = json.Unmarshal(sEnc, at)
   209  		if err != nil {
   210  			return nil, errors.New("auth_ticket_decode_error", "Error unmarshaling the auth ticket."+err.Error())
   211  		}
   212  
   213  		sd.authTicket = at
   214  	}
   215  
   216  	sd.ctx, sd.ctxCncl = context.WithCancel(alloc.ctx)
   217  
   218  	err := sd.initEC()
   219  	if err != nil {
   220  		return nil, err
   221  	}
   222  
   223  	if ref.EncryptedKey != "" {
   224  		sd.effectiveBlockSize = BlockSize - EncryptionOverHead
   225  		sd.encryptedKey = ref.EncryptedKey
   226  		err = sd.initEncryption()
   227  		if err != nil {
   228  			return nil, err
   229  		}
   230  	}
   231  
   232  	return sd, err
   233  }