storj.io/uplink@v1.13.0/private/piecestore/client.go (about)

     1  // Copyright (C) 2019 Storj Labs, Inc.
     2  // See LICENSE for copying information.
     3  
     4  package piecestore
     5  
     6  import (
     7  	"bytes"
     8  	"context"
     9  	"errors"
    10  	"io"
    11  	"os"
    12  	"time"
    13  
    14  	"github.com/zeebo/errs"
    15  
    16  	"storj.io/common/identity"
    17  	"storj.io/common/memory"
    18  	"storj.io/common/pb"
    19  	"storj.io/common/rpc"
    20  	"storj.io/common/storj"
    21  )
    22  
    23  // NoiseEnabled indicates whether Noise is enabled in this build.
    24  var NoiseEnabled = os.Getenv("STORJ_NOISE_DISABLED_EXPERIMENTAL") != "true"
    25  
    26  var errMessageTimeout = errors.New("message timeout")
    27  
    28  const (
    29  	// retainMessageLimit defines the max size which can be sent via normal request/response protocol.
    30  	// 4MB is the absolute max, defined by DRPC RPC, but we need a few bytes for protobuf overhead / filter header.
    31  	retainMessageLimit = 4100050
    32  )
    33  
    34  var (
    35  	// Error is the default error class for piecestore client.
    36  	Error = errs.Class("piecestore")
    37  
    38  	// CloseError is the error class used for errors generated during a
    39  	// stream close in a piecestore client.
    40  	//
    41  	// Errors of this type should also be wrapped with Error, for backwards
    42  	// compatibility.
    43  	CloseError = errs.Class("piecestore close")
    44  )
    45  
    46  // Config defines piecestore client parameters for upload and download.
    47  type Config struct {
    48  	DownloadBufferSize int64
    49  	UploadBufferSize   int64
    50  
    51  	InitialStep      int64
    52  	MaximumStep      int64
    53  	MaximumChunkSize int32
    54  
    55  	MessageTimeout time.Duration
    56  }
    57  
    58  // DefaultConfig are the default params used for upload and download.
    59  var DefaultConfig = Config{
    60  	DownloadBufferSize: 256 * memory.KiB.Int64(),
    61  	UploadBufferSize:   64 * memory.KiB.Int64(),
    62  
    63  	InitialStep:      256 * memory.KiB.Int64(),
    64  	MaximumStep:      550 * memory.KiB.Int64(),
    65  	MaximumChunkSize: 16 * memory.KiB.Int32(),
    66  
    67  	MessageTimeout: 10 * time.Minute,
    68  }
    69  
    70  // Client implements uploading, downloading and deleting content from a piecestore.
    71  type Client struct {
    72  	client         pb.DRPCPiecestoreClient
    73  	replaySafe     pb.DRPCReplaySafePiecestoreClient
    74  	nodeURL        storj.NodeURL
    75  	conn           *rpc.Conn
    76  	config         Config
    77  	UploadHashAlgo pb.PieceHashAlgorithm
    78  }
    79  
    80  // Dial dials the target piecestore endpoint.
    81  func Dial(ctx context.Context, dialer rpc.Dialer, nodeURL storj.NodeURL, config Config) (*Client, error) {
    82  	conn, err := dialer.DialNodeURL(ctx, nodeURL)
    83  	if err != nil {
    84  		return nil, Error.Wrap(err)
    85  	}
    86  
    87  	return &Client{
    88  		client:  pb.NewDRPCPiecestoreClient(conn),
    89  		nodeURL: nodeURL,
    90  		conn:    conn,
    91  		config:  config,
    92  	}, nil
    93  }
    94  
    95  // DialReplaySafe dials the target piecestore endpoint for replay safe request types.
    96  func DialReplaySafe(ctx context.Context, dialer rpc.Dialer, nodeURL storj.NodeURL, config Config) (*Client, error) {
    97  	conn, err := dialer.DialNode(ctx, nodeURL, rpc.DialOptions{ReplaySafe: NoiseEnabled})
    98  	if err != nil {
    99  		return nil, Error.Wrap(err)
   100  	}
   101  
   102  	return &Client{
   103  		replaySafe: pb.NewDRPCReplaySafePiecestoreClient(conn),
   104  		nodeURL:    nodeURL,
   105  		conn:       conn,
   106  		config:     config,
   107  	}, nil
   108  
   109  }
   110  
   111  // Retain uses a bloom filter to tell the piece store which pieces to keep.
   112  func (client *Client) Retain(ctx context.Context, req *pb.RetainRequest) (err error) {
   113  	defer mon.Task()(&ctx)(&err)
   114  
   115  	// TODO: eventually we will switch to the new protocol by default (because it has hash)
   116  	// but it requires more changes in storj.io/storj/storagenode first
   117  	// until that, we can use the old protocol, by default.
   118  	if len(req.Filter) <= retainMessageLimit {
   119  		_, err = client.client.Retain(ctx, req)
   120  		return err
   121  	}
   122  
   123  	// it's a big message, we will use the RetainBig, stream-based protocol
   124  
   125  	// hash is calculated on the full message, splitting error can also be detected.
   126  	hasher := pb.NewHashFromAlgorithm(pb.PieceHashAlgorithm_BLAKE3)
   127  	hasher.Write(req.Filter)
   128  	hash := hasher.Sum([]byte{})
   129  
   130  	stream, err := client.client.RetainBig(ctx)
   131  	if err != nil {
   132  		return Error.Wrap(err)
   133  	}
   134  	var lastMessage bool
   135  	for i := 0; i <= len(req.Filter); i += retainMessageLimit {
   136  
   137  		endOffset := i + retainMessageLimit
   138  
   139  		if endOffset > len(req.Filter) {
   140  			lastMessage = true
   141  			endOffset = len(req.Filter)
   142  
   143  		}
   144  		req := &pb.RetainRequest{
   145  			CreationDate: req.CreationDate,
   146  			Filter:       req.Filter[i:endOffset],
   147  		}
   148  
   149  		if lastMessage {
   150  			req.HashAlgorithm = pb.PieceHashAlgorithm_BLAKE3
   151  			req.Hash = hash
   152  		}
   153  
   154  		err = stream.Send(req)
   155  		if err != nil {
   156  			// this is too big, no hope to send with the legacy endpoint
   157  			return Error.Wrap(errs.Combine(err, stream.CloseSend()))
   158  		}
   159  	}
   160  	err = stream.Close()
   161  	return Error.Wrap(err)
   162  }
   163  
   164  // RetainRequestFromStream is the inverse logic of Client.Retain method, which splits the retain messages to smaller chunks.
   165  // strictly speaking, it's a server side code, but it's easier to maintain and test here, as it should be the
   166  // opposite of the Retain method.
   167  func RetainRequestFromStream(stream pb.DRPCPiecestore_RetainBigStream) (pb.RetainRequest, error) {
   168  	resp := pb.RetainRequest{}
   169  	for {
   170  		req, err := stream.Recv()
   171  		if err != nil {
   172  			return resp, err
   173  		}
   174  		if resp.CreationDate.IsZero() {
   175  			resp.CreationDate = req.CreationDate
   176  		}
   177  		resp.Filter = append(resp.Filter, req.Filter...)
   178  		if len(req.Hash) > 0 {
   179  			hasher := pb.NewHashFromAlgorithm(req.HashAlgorithm)
   180  			_, err := hasher.Write(resp.Filter)
   181  			if err != nil {
   182  				return resp, errs.Wrap(err)
   183  			}
   184  			if !bytes.Equal(req.Hash, hasher.Sum(nil)) {
   185  				return resp, errs.New("Hash mismatch")
   186  			}
   187  			break
   188  		}
   189  	}
   190  	return resp, stream.Close()
   191  }
   192  
   193  // Close closes the underlying connection.
   194  func (client *Client) Close() error {
   195  	return client.conn.Close()
   196  }
   197  
   198  // GetPeerIdentity gets the connection's peer identity. This doesn't work
   199  // on Noise-based connections.
   200  func (client *Client) GetPeerIdentity() (*identity.PeerIdentity, error) {
   201  	return client.conn.PeerIdentity()
   202  }
   203  
   204  // NodeURL returns the Node we dialed.
   205  func (client *Client) NodeURL() storj.NodeURL { return client.nodeURL }
   206  
   207  // next allocation step find the next trusted step.
   208  func (client *Client) nextOrderStep(previous int64) int64 {
   209  	// TODO: ensure that this is frame idependent
   210  	next := previous * 3 / 2
   211  	if next > client.config.MaximumStep {
   212  		next = client.config.MaximumStep
   213  	}
   214  	return next
   215  }
   216  
   217  // ignoreEOF is an utility func for ignoring EOF error, when it's not important.
   218  func ignoreEOF(err error) error {
   219  	if errors.Is(err, io.EOF) {
   220  		return nil
   221  	}
   222  	return err
   223  }