github.com/mutagen-io/mutagen@v0.18.0-rc1/pkg/synchronization/endpoint/remote/client.go (about)

     1  package remote
     2  
     3  import (
     4  	"bufio"
     5  	"context"
     6  	"errors"
     7  	"fmt"
     8  	"io"
     9  
    10  	"google.golang.org/protobuf/proto"
    11  
    12  	"github.com/mutagen-io/mutagen/pkg/encoding"
    13  	"github.com/mutagen-io/mutagen/pkg/logging"
    14  	streampkg "github.com/mutagen-io/mutagen/pkg/stream"
    15  	"github.com/mutagen-io/mutagen/pkg/synchronization"
    16  	"github.com/mutagen-io/mutagen/pkg/synchronization/compression"
    17  	"github.com/mutagen-io/mutagen/pkg/synchronization/core"
    18  	"github.com/mutagen-io/mutagen/pkg/synchronization/rsync"
    19  )
    20  
    21  // endpointClient provides an implementation of synchronization.Endpoint by
    22  // acting as a proxy for a remotely hosted synchronization.Endpoint.
    23  type endpointClient struct {
    24  	// logger is the underlying logger.
    25  	logger *logging.Logger
    26  	// closer close the compression resources and the control stream.
    27  	closer io.Closer
    28  	// flusher flushes the outbound control stream.
    29  	flusher streampkg.Flusher
    30  	// encoder is the control stream encoder.
    31  	encoder *encoding.ProtobufEncoder
    32  	// decoder is the control stream decoder.
    33  	decoder *encoding.ProtobufDecoder
    34  	// lastSnapshotBytes is the serialized form of the last snapshot received
    35  	// from the remote endpoint.
    36  	lastSnapshotBytes []byte
    37  }
    38  
    39  // NewEndpoint creates a new remote synchronization.Endpoint operating over the
    40  // specified stream with the specified metadata. If this function fails, then
    41  // the provided stream will be closed. Once the endpoint has been established,
    42  // the underlying stream is owned by the endpoint and will be closed when the
    43  // endpoint is shut down. The provided stream must unblock read and write
    44  // operations when closed.
    45  func NewEndpoint(
    46  	logger *logging.Logger,
    47  	stream io.ReadWriteCloser,
    48  	root string,
    49  	session string,
    50  	version synchronization.Version,
    51  	configuration *synchronization.Configuration,
    52  	alpha bool,
    53  ) (synchronization.Endpoint, error) {
    54  	// Compute the effective compression algorithm.
    55  	compressionAlgorithm := configuration.CompressionAlgorithm
    56  	if compressionAlgorithm.IsDefault() {
    57  		compressionAlgorithm = version.DefaultCompressionAlgorithm()
    58  	}
    59  
    60  	// Perform the compression handshake.
    61  	if err := compression.ClientHandshake(stream, compressionAlgorithm); err != nil {
    62  		stream.Close()
    63  		return nil, fmt.Errorf("compression handshake failed: %w", err)
    64  	}
    65  
    66  	// Set up inbound buffering and decompression. While the decompressor does
    67  	// have some internal buffering, we need the inbound stream to support
    68  	// io.ByteReader for our Protocol Buffer decoding, so we add a bufio.Reader
    69  	// around it with additional buffering.
    70  	compressedInbound := bufio.NewReaderSize(stream, controlStreamCompressedBufferSize)
    71  	decompressor := compressionAlgorithm.Decompress(compressedInbound)
    72  	inbound := bufio.NewReaderSize(decompressor, controlStreamUncompressedBufferSize)
    73  
    74  	// Set up outbound buffering and compression.
    75  	compressedOutbound := bufio.NewWriterSize(stream, controlStreamCompressedBufferSize)
    76  	compressor := compressionAlgorithm.Compress(compressedOutbound)
    77  	outbound := bufio.NewWriterSize(compressor, controlStreamUncompressedBufferSize)
    78  
    79  	// Create a mechanism to flush the outbound pipeline.
    80  	flusher := streampkg.NewMultiFlusher(outbound, compressor, compressedOutbound)
    81  
    82  	// Create a closer for the control stream and compression resources.
    83  	closer := streampkg.NewMultiCloser(
    84  		streampkg.NewFlushCloser(outbound),
    85  		compressor,
    86  		streampkg.NewFlushCloser(compressedOutbound),
    87  		stream,
    88  		decompressor,
    89  	)
    90  
    91  	// Set up deferred closure of the control stream and compression resources
    92  	// in the event that initialization fails.
    93  	var successful bool
    94  	defer func() {
    95  		if !successful {
    96  			closer.Close()
    97  		}
    98  	}()
    99  
   100  	// Create an encoder and a decoder for Protocol Buffers messages.
   101  	encoder := encoding.NewProtobufEncoder(outbound)
   102  	decoder := encoding.NewProtobufDecoder(inbound)
   103  
   104  	// Create and send the initialize request.
   105  	request := &InitializeSynchronizationRequest{
   106  		Root:          root,
   107  		Session:       session,
   108  		Version:       version,
   109  		Configuration: configuration,
   110  		Alpha:         alpha,
   111  	}
   112  	if err := encoder.Encode(request); err != nil {
   113  		return nil, fmt.Errorf("unable to encode initialize request: %w", err)
   114  	} else if err = flusher.Flush(); err != nil {
   115  		return nil, fmt.Errorf("unable to transmit initialize request: %w", err)
   116  	}
   117  
   118  	// Receive the response and check for remote errors.
   119  	response := &InitializeSynchronizationResponse{}
   120  	if err := decoder.Decode(response); err != nil {
   121  		return nil, fmt.Errorf("unable to receive transition response: %w", err)
   122  	} else if err = response.ensureValid(); err != nil {
   123  		return nil, fmt.Errorf("invalid initialize response: %w", err)
   124  	} else if response.Error != "" {
   125  		return nil, fmt.Errorf("remote error: %s", response.Error)
   126  	}
   127  
   128  	// Success.
   129  	successful = true
   130  	return &endpointClient{
   131  		logger:  logger,
   132  		closer:  closer,
   133  		flusher: flusher,
   134  		encoder: encoder,
   135  		decoder: decoder,
   136  	}, nil
   137  }
   138  
   139  // encodeAndFlush encodes a Protocol Buffers message using the underlying
   140  // encoder and then flushes the control stream.
   141  func (c *endpointClient) encodeAndFlush(message proto.Message) error {
   142  	if err := c.encoder.Encode(message); err != nil {
   143  		return err
   144  	} else if err = c.flusher.Flush(); err != nil {
   145  		return fmt.Errorf("message transmission failed: %w", err)
   146  	}
   147  	return nil
   148  }
   149  
   150  // Poll implements the Poll method for remote endpoints.
   151  func (c *endpointClient) Poll(ctx context.Context) error {
   152  	// Create and send the poll request.
   153  	request := &EndpointRequest{Poll: &PollRequest{}}
   154  	if err := c.encodeAndFlush(request); err != nil {
   155  		return fmt.Errorf("unable to send poll request: %w", err)
   156  	}
   157  
   158  	// Create a subcontext that we can cancel to regulate transmission of the
   159  	// completion request.
   160  	completionCtx, cancel := context.WithCancel(ctx)
   161  
   162  	// Create a Goroutine that will send a poll completion request when the
   163  	// subcontext is cancelled.
   164  	completionSendErrors := make(chan error, 1)
   165  	go func() {
   166  		<-completionCtx.Done()
   167  		if err := c.encodeAndFlush(&PollCompletionRequest{}); err != nil {
   168  			completionSendErrors <- fmt.Errorf("unable to send completion request: %w", err)
   169  		} else {
   170  			completionSendErrors <- nil
   171  		}
   172  	}()
   173  
   174  	// Create a Goroutine that will receive a poll response.
   175  	response := &PollResponse{}
   176  	responseReceiveErrors := make(chan error, 1)
   177  	go func() {
   178  		if err := c.decoder.Decode(response); err != nil {
   179  			responseReceiveErrors <- fmt.Errorf("unable to receive poll response: %w", err)
   180  		} else if err = response.ensureValid(); err != nil {
   181  			responseReceiveErrors <- fmt.Errorf("invalid poll response: %w", err)
   182  		} else {
   183  			responseReceiveErrors <- nil
   184  		}
   185  	}()
   186  
   187  	// Wait for both a completion request to be sent and a response to be
   188  	// received. Both of these will occur, though their order is not known. If
   189  	// the completion request is sent first, then we know that the polling
   190  	// context has been cancelled and that a response is on its way. In this
   191  	// case, we still cancel the subcontext we created as required by the
   192  	// context package to avoid leaking resources. If the response comes first,
   193  	// then we need to force sending of the completion request and wait for the
   194  	// result of that operation.
   195  	var completionSendErr, responseReceiveErr error
   196  	select {
   197  	case completionSendErr = <-completionSendErrors:
   198  		cancel()
   199  		responseReceiveErr = <-responseReceiveErrors
   200  	case responseReceiveErr = <-responseReceiveErrors:
   201  		cancel()
   202  		completionSendErr = <-completionSendErrors
   203  	}
   204  
   205  	// Check for transmission errors.
   206  	if responseReceiveErr != nil {
   207  		return responseReceiveErr
   208  	} else if completionSendErr != nil {
   209  		return completionSendErr
   210  	}
   211  
   212  	// Check for remote errors.
   213  	if response.Error != "" {
   214  		return fmt.Errorf("remote error: %s", response.Error)
   215  	}
   216  
   217  	// Done.
   218  	return nil
   219  }
   220  
   221  // Scan implements the Scan method for remote endpoints.
   222  func (c *endpointClient) Scan(ctx context.Context, ancestor *core.Entry, full bool) (*core.Snapshot, error, bool) {
   223  	// Create an rsync engine.
   224  	engine := rsync.NewEngine()
   225  
   226  	// Compute the bytes that we'll use as the base for receiving the snapshot.
   227  	// If we have the bytes from the last received snapshot, then use those,
   228  	// because they'll be more acccurate, but otherwise use the provided
   229  	// ancestor (with some probabilistic assumptions about filesystem behavior).
   230  	var baselineBytes []byte
   231  	if c.lastSnapshotBytes != nil {
   232  		c.logger.Debug("Using last snapshot bytes as baseline")
   233  		baselineBytes = c.lastSnapshotBytes
   234  	} else {
   235  		c.logger.Debug("Using ancestor-based snapshot as baseline")
   236  		var err error
   237  		marshaling := proto.MarshalOptions{Deterministic: true}
   238  		baselineBytes, err = marshaling.Marshal(&core.Snapshot{
   239  			Content:                ancestor,
   240  			PreservesExecutability: true,
   241  		})
   242  		if err != nil {
   243  			return nil, fmt.Errorf("unable to marshal ancestor-based snapshot: %w", err), false
   244  		}
   245  	}
   246  
   247  	// Compute the base signature.
   248  	baselineSignature := engine.BytesSignature(baselineBytes, 0)
   249  
   250  	// Create and send the scan request.
   251  	request := &EndpointRequest{
   252  		Scan: &ScanRequest{
   253  			BaselineSnapshotSignature: baselineSignature,
   254  			Full:                      full,
   255  		},
   256  	}
   257  	if err := c.encodeAndFlush(request); err != nil {
   258  		return nil, fmt.Errorf("unable to send scan request: %w", err), false
   259  	}
   260  
   261  	// Create a subcontext that we can cancel to regulate transmission of the
   262  	// completion request.
   263  	completionCtx, cancel := context.WithCancel(ctx)
   264  
   265  	// Create a Goroutine that will send a scan completion request when the
   266  	// subcontext is cancelled.
   267  	completionSendErrors := make(chan error, 1)
   268  	go func() {
   269  		<-completionCtx.Done()
   270  		if err := c.encodeAndFlush(&ScanCompletionRequest{}); err != nil {
   271  			completionSendErrors <- fmt.Errorf("unable to send completion request: %w", err)
   272  		} else {
   273  			completionSendErrors <- nil
   274  		}
   275  	}()
   276  
   277  	// Create a Goroutine that will receive a scan response.
   278  	response := &ScanResponse{}
   279  	responseReceiveErrors := make(chan error, 1)
   280  	go func() {
   281  		if err := c.decoder.Decode(response); err != nil {
   282  			responseReceiveErrors <- fmt.Errorf("unable to receive scan response: %w", err)
   283  		} else if err = response.ensureValid(); err != nil {
   284  			responseReceiveErrors <- fmt.Errorf("invalid scan response: %w", err)
   285  		} else {
   286  			responseReceiveErrors <- nil
   287  		}
   288  	}()
   289  
   290  	// Wait for both a completion request to be sent and a response to be
   291  	// received. Both of these will occur, though their order is not known. If
   292  	// the completion request is sent first, then we know that the scanning
   293  	// context has been cancelled and that a response is on its way. In this
   294  	// case, we still cancel the subcontext we created as required by the
   295  	// context package to avoid leaking resources. If the response comes first,
   296  	// then we need to force sending of the completion request and wait for the
   297  	// result of that operation.
   298  	var completionSendErr, responseReceiveErr error
   299  	select {
   300  	case completionSendErr = <-completionSendErrors:
   301  		cancel()
   302  		responseReceiveErr = <-responseReceiveErrors
   303  	case responseReceiveErr = <-responseReceiveErrors:
   304  		cancel()
   305  		completionSendErr = <-completionSendErrors
   306  	}
   307  
   308  	// Check for transmission errors.
   309  	if responseReceiveErr != nil {
   310  		return nil, responseReceiveErr, false
   311  	} else if completionSendErr != nil {
   312  		return nil, completionSendErr, false
   313  	}
   314  
   315  	// Check for remote errors.
   316  	if response.Error != "" {
   317  		return nil, fmt.Errorf("remote error: %s", response.Error), response.TryAgain
   318  	}
   319  
   320  	// Apply the remote's deltas to the expected snapshot.
   321  	snapshotBytes, err := engine.PatchBytes(baselineBytes, baselineSignature, response.SnapshotDelta)
   322  	if err != nil {
   323  		return nil, fmt.Errorf("unable to patch base snapshot: %w", err), false
   324  	}
   325  
   326  	// If logging is enabled, then compute snapshot transmission statistics.
   327  	if c.logger.Level() >= logging.LevelDebug {
   328  		var dataOperations, totalDataSize, blockOperations int
   329  		for _, operation := range response.SnapshotDelta {
   330  			if dataSize := len(operation.Data); dataSize > 0 {
   331  				dataOperations++
   332  				totalDataSize += dataSize
   333  			} else {
   334  				blockOperations++
   335  			}
   336  		}
   337  		c.logger.Debugf("Snapshot delta yielded %d bytes using %d block operation(s) and %d data operation(s) totaling %d byte(s)",
   338  			len(snapshotBytes), blockOperations, dataOperations, totalDataSize,
   339  		)
   340  	}
   341  
   342  	// Unmarshal the snapshot.
   343  	snapshot := &core.Snapshot{}
   344  	if err := proto.Unmarshal(snapshotBytes, snapshot); err != nil {
   345  		return nil, fmt.Errorf("unable to unmarshal snapshot: %w", err), false
   346  	}
   347  
   348  	// Ensure that the snapshot is valid since it came over the network. Ideally
   349  	// we'd want this validation to be performed by the ensureValid method of
   350  	// ScanResponse, but because this method requires rsync-based patching and
   351  	// Protocol Buffers decoding before it actually has the underlying response,
   352  	// we can't perform this validation in ScanResponse.ensureValid.
   353  	if err = snapshot.EnsureValid(); err != nil {
   354  		return nil, fmt.Errorf("invalid snapshot received: %w", err), false
   355  	}
   356  
   357  	// Store the bytes that gave us a successful snapshot so that we can use
   358  	// them as a baseline for receiving the next snapshot, but only do this if
   359  	// the snapshot content was non-nil (i.e. there were entries on disk). If we
   360  	// received a snapshot with no entries, then chances are that it's coming
   361  	// from a remote endpoint that hasn't yet been populated by content, meaning
   362  	// its next transmission (after being populated) is going to be far closer
   363  	// to ancestor than to the empty snapshot that it just sent, and thus we'll
   364  	// want to use the serialized ancestor snapshot as the baseline until we
   365  	// receive a populated snapshot.
   366  	if snapshot.Content != nil {
   367  		c.lastSnapshotBytes = snapshotBytes
   368  	}
   369  
   370  	// Success.
   371  	return snapshot, nil, false
   372  }
   373  
   374  // Stage implements the Stage method for remote endpoints.
   375  func (c *endpointClient) Stage(paths []string, digests [][]byte) ([]string, []*rsync.Signature, rsync.Receiver, error) {
   376  	// Validate argument lengths and bail if there's nothing to stage.
   377  	if len(paths) != len(digests) {
   378  		return nil, nil, nil, errors.New("path count does not match digest count")
   379  	} else if len(paths) == 0 {
   380  		return nil, nil, nil, nil
   381  	}
   382  
   383  	// Create and send the stage request.
   384  	request := &EndpointRequest{
   385  		Stage: &StageRequest{
   386  			Paths:   paths,
   387  			Digests: digests,
   388  		},
   389  	}
   390  	if err := c.encodeAndFlush(request); err != nil {
   391  		return nil, nil, nil, fmt.Errorf("unable to send stage request: %w", err)
   392  	}
   393  
   394  	// Receive the response and check for remote errors.
   395  	response := &StageResponse{}
   396  	if err := c.decoder.Decode(response); err != nil {
   397  		return nil, nil, nil, fmt.Errorf("unable to receive stage response: %w", err)
   398  	} else if err = response.ensureValid(paths); err != nil {
   399  		return nil, nil, nil, fmt.Errorf("invalid stage response: %w", err)
   400  	} else if response.Error != "" {
   401  		return nil, nil, nil, fmt.Errorf("remote error: %s", response.Error)
   402  	}
   403  
   404  	// Handle the shorthand mechanism used by the remote to indicate that all
   405  	// paths are required.
   406  	requiredPaths := response.Paths
   407  	if len(response.Paths) == 0 && len(response.Signatures) > 0 {
   408  		requiredPaths = paths
   409  	}
   410  
   411  	// If everything was already staged, then we can abort the staging
   412  	// operation.
   413  	if len(requiredPaths) == 0 {
   414  		return nil, nil, nil, nil
   415  	}
   416  
   417  	// Create an encoding receiver that can transmit rsync operations to the
   418  	// remote.
   419  	encoder := &protobufRsyncEncoder{encoder: c.encoder, flusher: c.flusher}
   420  	receiver := rsync.NewEncodingReceiver(encoder)
   421  
   422  	// Success.
   423  	return requiredPaths, response.Signatures, receiver, nil
   424  }
   425  
   426  // Supply implements the Supply method for remote endpoints.
   427  func (c *endpointClient) Supply(paths []string, signatures []*rsync.Signature, receiver rsync.Receiver) error {
   428  	// Create and send the supply request.
   429  	request := &EndpointRequest{
   430  		Supply: &SupplyRequest{
   431  			Paths:      paths,
   432  			Signatures: signatures,
   433  		},
   434  	}
   435  	if err := c.encodeAndFlush(request); err != nil {
   436  		// TODO: Should we find a way to finalize the receiver here? That's a
   437  		// private rsync method, and there shouldn't be any resources in the
   438  		// receiver in need of finalizing here, but it would be worth thinking
   439  		// about for consistency.
   440  		return fmt.Errorf("unable to send supply request: %w", err)
   441  	}
   442  
   443  	// We don't receive a response to ensure that the remote is ready to
   444  	// transmit, because there aren't really any errors that we can detect
   445  	// before transmission starts and there's no way to transmit them once
   446  	// transmission starts. If DecodeToReceiver succeeds, we can assume that the
   447  	// forwarding succeeded, and if it fails, there's really no way for us to
   448  	// get error information from the remote.
   449  
   450  	// The endpoint should now forward rsync operations, so we need to decode
   451  	// and forward them to the receiver. If this operation completes
   452  	// successfully, supplying is complete and successful.
   453  	decoder := &protobufRsyncDecoder{decoder: c.decoder}
   454  	if err := rsync.DecodeToReceiver(decoder, uint64(len(paths)), receiver); err != nil {
   455  		return fmt.Errorf("unable to decode and forward rsync operations: %w", err)
   456  	}
   457  
   458  	// Success.
   459  	return nil
   460  }
   461  
   462  // Transition implements the Transition method for remote endpoints.
   463  func (c *endpointClient) Transition(ctx context.Context, transitions []*core.Change) ([]*core.Entry, []*core.Problem, bool, error) {
   464  	// Create and send the transition request.
   465  	request := &EndpointRequest{
   466  		Transition: &TransitionRequest{
   467  			Transitions: transitions,
   468  		},
   469  	}
   470  	if err := c.encodeAndFlush(request); err != nil {
   471  		return nil, nil, false, fmt.Errorf("unable to send transition request: %w", err)
   472  	}
   473  
   474  	// Create a subcontext that we can cancel to regulate transmission of the
   475  	// completion request.
   476  	completionCtx, cancel := context.WithCancel(ctx)
   477  
   478  	// Create a Goroutine that will send a transition completion request when
   479  	// the subcontext is cancelled.
   480  	completionSendErrors := make(chan error, 1)
   481  	go func() {
   482  		<-completionCtx.Done()
   483  		if err := c.encodeAndFlush(&TransitionCompletionRequest{}); err != nil {
   484  			completionSendErrors <- fmt.Errorf("unable to send completion request: %w", err)
   485  		} else {
   486  			completionSendErrors <- nil
   487  		}
   488  	}()
   489  
   490  	// Create a Goroutine that will receive a transition response.
   491  	response := &TransitionResponse{}
   492  	responseReceiveErrors := make(chan error, 1)
   493  	go func() {
   494  		if err := c.decoder.Decode(response); err != nil {
   495  			responseReceiveErrors <- fmt.Errorf("unable to receive transition response: %w", err)
   496  		} else if err = response.ensureValid(len(transitions)); err != nil {
   497  			responseReceiveErrors <- fmt.Errorf("invalid transition response: %w", err)
   498  		} else {
   499  			responseReceiveErrors <- nil
   500  		}
   501  	}()
   502  
   503  	// Wait for both a completion request to be sent and a response to be
   504  	// received. Both of these will occur, though their order is not known. If
   505  	// the completion request is sent first, then we know that the transition
   506  	// context has been cancelled and that a response is on its way. In this
   507  	// case, we still cancel the subcontext we created as required by the
   508  	// context package to avoid leaking resources. If the response comes first,
   509  	// then we need to force sending of the completion request and wait for the
   510  	// result of that operation.
   511  	var completionSendErr, responseReceiveErr error
   512  	select {
   513  	case completionSendErr = <-completionSendErrors:
   514  		cancel()
   515  		responseReceiveErr = <-responseReceiveErrors
   516  	case responseReceiveErr = <-responseReceiveErrors:
   517  		cancel()
   518  		completionSendErr = <-completionSendErrors
   519  	}
   520  
   521  	// Check for transmission errors.
   522  	if responseReceiveErr != nil {
   523  		return nil, nil, false, responseReceiveErr
   524  	} else if completionSendErr != nil {
   525  		return nil, nil, false, completionSendErr
   526  	}
   527  
   528  	// Check for remote errors.
   529  	if response.Error != "" {
   530  		return nil, nil, false, fmt.Errorf("remote error: %s", response.Error)
   531  	}
   532  
   533  	// HACK: Extract the wrapped results.
   534  	results := make([]*core.Entry, len(response.Results))
   535  	for r, result := range response.Results {
   536  		results[r] = result.Content
   537  	}
   538  
   539  	// Success.
   540  	return results, response.Problems, response.StagerMissingFiles, nil
   541  }
   542  
   543  // Shutdown implements the Shutdown method for remote endpoints.
   544  func (c *endpointClient) Shutdown() error {
   545  	// Close the compression resources and the control stream. This will cause
   546  	// all control stream reads/writes to unblock.
   547  	return c.closer.Close()
   548  }