github.com/pkg/sftp@v1.13.6/client.go (about)

     1  package sftp
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/binary"
     6  	"errors"
     7  	"fmt"
     8  	"io"
     9  	"math"
    10  	"os"
    11  	"path"
    12  	"sync"
    13  	"sync/atomic"
    14  	"syscall"
    15  	"time"
    16  
    17  	"github.com/kr/fs"
    18  	"golang.org/x/crypto/ssh"
    19  )
    20  
    21  var (
    22  	// ErrInternalInconsistency indicates the packets sent and the data queued to be
    23  	// written to the file don't match up. It is an unusual error and usually is
    24  	// caused by bad behavior server side or connection issues. The error is
    25  	// limited in scope to the call where it happened, the client object is still
    26  	// OK to use as long as the connection is still open.
    27  	ErrInternalInconsistency = errors.New("internal inconsistency")
    28  	// InternalInconsistency alias for ErrInternalInconsistency.
    29  	//
    30  	// Deprecated: please use ErrInternalInconsistency
    31  	InternalInconsistency = ErrInternalInconsistency
    32  )
    33  
    34  // A ClientOption is a function which applies configuration to a Client.
    35  type ClientOption func(*Client) error
    36  
    37  // MaxPacketChecked sets the maximum size of the payload, measured in bytes.
    38  // This option only accepts sizes servers should support, ie. <= 32768 bytes.
    39  //
    40  // If you get the error "failed to send packet header: EOF" when copying a
    41  // large file, try lowering this number.
    42  //
    43  // The default packet size is 32768 bytes.
    44  func MaxPacketChecked(size int) ClientOption {
    45  	return func(c *Client) error {
    46  		if size < 1 {
    47  			return errors.New("size must be greater or equal to 1")
    48  		}
    49  		if size > 32768 {
    50  			return errors.New("sizes larger than 32KB might not work with all servers")
    51  		}
    52  		c.maxPacket = size
    53  		return nil
    54  	}
    55  }
    56  
    57  // MaxPacketUnchecked sets the maximum size of the payload, measured in bytes.
    58  // It accepts sizes larger than the 32768 bytes all servers should support.
    59  // Only use a setting higher than 32768 if your application always connects to
    60  // the same server or after sufficiently broad testing.
    61  //
    62  // If you get the error "failed to send packet header: EOF" when copying a
    63  // large file, try lowering this number.
    64  //
    65  // The default packet size is 32768 bytes.
    66  func MaxPacketUnchecked(size int) ClientOption {
    67  	return func(c *Client) error {
    68  		if size < 1 {
    69  			return errors.New("size must be greater or equal to 1")
    70  		}
    71  		c.maxPacket = size
    72  		return nil
    73  	}
    74  }
    75  
    76  // MaxPacket sets the maximum size of the payload, measured in bytes.
    77  // This option only accepts sizes servers should support, ie. <= 32768 bytes.
    78  // This is a synonym for MaxPacketChecked that provides backward compatibility.
    79  //
    80  // If you get the error "failed to send packet header: EOF" when copying a
    81  // large file, try lowering this number.
    82  //
    83  // The default packet size is 32768 bytes.
    84  func MaxPacket(size int) ClientOption {
    85  	return MaxPacketChecked(size)
    86  }
    87  
    88  // MaxConcurrentRequestsPerFile sets the maximum concurrent requests allowed for a single file.
    89  //
    90  // The default maximum concurrent requests is 64.
    91  func MaxConcurrentRequestsPerFile(n int) ClientOption {
    92  	return func(c *Client) error {
    93  		if n < 1 {
    94  			return errors.New("n must be greater or equal to 1")
    95  		}
    96  		c.maxConcurrentRequests = n
    97  		return nil
    98  	}
    99  }
   100  
   101  // UseConcurrentWrites allows the Client to perform concurrent Writes.
   102  //
   103  // Using concurrency while doing writes, requires special consideration.
   104  // A write to a later offset in a file after an error,
   105  // could end up with a file length longer than what was successfully written.
   106  //
   107  // When using this option, if you receive an error during `io.Copy` or `io.WriteTo`,
   108  // you may need to `Truncate` the target Writer to avoid “holes” in the data written.
   109  func UseConcurrentWrites(value bool) ClientOption {
   110  	return func(c *Client) error {
   111  		c.useConcurrentWrites = value
   112  		return nil
   113  	}
   114  }
   115  
   116  // UseConcurrentReads allows the Client to perform concurrent Reads.
   117  //
   118  // Concurrent reads are generally safe to use and not using them will degrade
   119  // performance, so this option is enabled by default.
   120  //
   121  // When enabled, WriteTo will use Stat/Fstat to get the file size and determines
   122  // how many concurrent workers to use.
   123  // Some "read once" servers will delete the file if they receive a stat call on an
   124  // open file and then the download will fail.
   125  // Disabling concurrent reads you will be able to download files from these servers.
   126  // If concurrent reads are disabled, the UseFstat option is ignored.
   127  func UseConcurrentReads(value bool) ClientOption {
   128  	return func(c *Client) error {
   129  		c.disableConcurrentReads = !value
   130  		return nil
   131  	}
   132  }
   133  
   134  // UseFstat sets whether to use Fstat or Stat when File.WriteTo is called
   135  // (usually when copying files).
   136  // Some servers limit the amount of open files and calling Stat after opening
   137  // the file will throw an error From the server. Setting this flag will call
   138  // Fstat instead of Stat which is suppose to be called on an open file handle.
   139  //
   140  // It has been found that that with IBM Sterling SFTP servers which have
   141  // "extractability" level set to 1 which means only 1 file can be opened at
   142  // any given time.
   143  //
   144  // If the server you are working with still has an issue with both Stat and
   145  // Fstat calls you can always open a file and read it until the end.
   146  //
   147  // Another reason to read the file until its end and Fstat doesn't work is
   148  // that in some servers, reading a full file will automatically delete the
   149  // file as some of these mainframes map the file to a message in a queue.
   150  // Once the file has been read it will get deleted.
   151  func UseFstat(value bool) ClientOption {
   152  	return func(c *Client) error {
   153  		c.useFstat = value
   154  		return nil
   155  	}
   156  }
   157  
   158  // Client represents an SFTP session on a *ssh.ClientConn SSH connection.
   159  // Multiple Clients can be active on a single SSH connection, and a Client
   160  // may be called concurrently from multiple Goroutines.
   161  //
   162  // Client implements the github.com/kr/fs.FileSystem interface.
   163  type Client struct {
   164  	clientConn
   165  
   166  	ext map[string]string // Extensions (name -> data).
   167  
   168  	maxPacket             int // max packet size read or written.
   169  	maxConcurrentRequests int
   170  	nextid                uint32
   171  
   172  	// write concurrency is… error prone.
   173  	// Default behavior should be to not use it.
   174  	useConcurrentWrites    bool
   175  	useFstat               bool
   176  	disableConcurrentReads bool
   177  }
   178  
   179  // NewClient creates a new SFTP client on conn, using zero or more option
   180  // functions.
   181  func NewClient(conn *ssh.Client, opts ...ClientOption) (*Client, error) {
   182  	s, err := conn.NewSession()
   183  	if err != nil {
   184  		return nil, err
   185  	}
   186  	if err := s.RequestSubsystem("sftp"); err != nil {
   187  		return nil, err
   188  	}
   189  	pw, err := s.StdinPipe()
   190  	if err != nil {
   191  		return nil, err
   192  	}
   193  	pr, err := s.StdoutPipe()
   194  	if err != nil {
   195  		return nil, err
   196  	}
   197  
   198  	return NewClientPipe(pr, pw, opts...)
   199  }
   200  
   201  // NewClientPipe creates a new SFTP client given a Reader and a WriteCloser.
   202  // This can be used for connecting to an SFTP server over TCP/TLS or by using
   203  // the system's ssh client program (e.g. via exec.Command).
   204  func NewClientPipe(rd io.Reader, wr io.WriteCloser, opts ...ClientOption) (*Client, error) {
   205  	sftp := &Client{
   206  		clientConn: clientConn{
   207  			conn: conn{
   208  				Reader:      rd,
   209  				WriteCloser: wr,
   210  			},
   211  			inflight: make(map[uint32]chan<- result),
   212  			closed:   make(chan struct{}),
   213  		},
   214  
   215  		ext: make(map[string]string),
   216  
   217  		maxPacket:             1 << 15,
   218  		maxConcurrentRequests: 64,
   219  	}
   220  
   221  	for _, opt := range opts {
   222  		if err := opt(sftp); err != nil {
   223  			wr.Close()
   224  			return nil, err
   225  		}
   226  	}
   227  
   228  	if err := sftp.sendInit(); err != nil {
   229  		wr.Close()
   230  		return nil, fmt.Errorf("error sending init packet to server: %w", err)
   231  	}
   232  
   233  	if err := sftp.recvVersion(); err != nil {
   234  		wr.Close()
   235  		return nil, fmt.Errorf("error receiving version packet from server: %w", err)
   236  	}
   237  
   238  	sftp.clientConn.wg.Add(1)
   239  	go func() {
   240  		defer sftp.clientConn.wg.Done()
   241  
   242  		if err := sftp.clientConn.recv(); err != nil {
   243  			sftp.clientConn.broadcastErr(err)
   244  		}
   245  	}()
   246  
   247  	return sftp, nil
   248  }
   249  
   250  // Create creates the named file mode 0666 (before umask), truncating it if it
   251  // already exists. If successful, methods on the returned File can be used for
   252  // I/O; the associated file descriptor has mode O_RDWR. If you need more
   253  // control over the flags/mode used to open the file see client.OpenFile.
   254  //
   255  // Note that some SFTP servers (eg. AWS Transfer) do not support opening files
   256  // read/write at the same time. For those services you will need to use
   257  // `client.OpenFile(os.O_WRONLY|os.O_CREATE|os.O_TRUNC)`.
   258  func (c *Client) Create(path string) (*File, error) {
   259  	return c.open(path, flags(os.O_RDWR|os.O_CREATE|os.O_TRUNC))
   260  }
   261  
   262  const sftpProtocolVersion = 3 // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt
   263  
   264  func (c *Client) sendInit() error {
   265  	return c.clientConn.conn.sendPacket(&sshFxInitPacket{
   266  		Version: sftpProtocolVersion, // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt
   267  	})
   268  }
   269  
   270  // returns the next value of c.nextid
   271  func (c *Client) nextID() uint32 {
   272  	return atomic.AddUint32(&c.nextid, 1)
   273  }
   274  
   275  func (c *Client) recvVersion() error {
   276  	typ, data, err := c.recvPacket(0)
   277  	if err != nil {
   278  		if err == io.EOF {
   279  			return fmt.Errorf("server unexpectedly closed connection: %w", io.ErrUnexpectedEOF)
   280  		}
   281  
   282  		return err
   283  	}
   284  
   285  	if typ != sshFxpVersion {
   286  		return &unexpectedPacketErr{sshFxpVersion, typ}
   287  	}
   288  
   289  	version, data, err := unmarshalUint32Safe(data)
   290  	if err != nil {
   291  		return err
   292  	}
   293  
   294  	if version != sftpProtocolVersion {
   295  		return &unexpectedVersionErr{sftpProtocolVersion, version}
   296  	}
   297  
   298  	for len(data) > 0 {
   299  		var ext extensionPair
   300  		ext, data, err = unmarshalExtensionPair(data)
   301  		if err != nil {
   302  			return err
   303  		}
   304  		c.ext[ext.Name] = ext.Data
   305  	}
   306  
   307  	return nil
   308  }
   309  
   310  // HasExtension checks whether the server supports a named extension.
   311  //
   312  // The first return value is the extension data reported by the server
   313  // (typically a version number).
   314  func (c *Client) HasExtension(name string) (string, bool) {
   315  	data, ok := c.ext[name]
   316  	return data, ok
   317  }
   318  
   319  // Walk returns a new Walker rooted at root.
   320  func (c *Client) Walk(root string) *fs.Walker {
   321  	return fs.WalkFS(root, c)
   322  }
   323  
   324  // ReadDir reads the directory named by dirname and returns a list of
   325  // directory entries.
   326  func (c *Client) ReadDir(p string) ([]os.FileInfo, error) {
   327  	handle, err := c.opendir(p)
   328  	if err != nil {
   329  		return nil, err
   330  	}
   331  	defer c.close(handle) // this has to defer earlier than the lock below
   332  	var attrs []os.FileInfo
   333  	var done = false
   334  	for !done {
   335  		id := c.nextID()
   336  		typ, data, err1 := c.sendPacket(nil, &sshFxpReaddirPacket{
   337  			ID:     id,
   338  			Handle: handle,
   339  		})
   340  		if err1 != nil {
   341  			err = err1
   342  			done = true
   343  			break
   344  		}
   345  		switch typ {
   346  		case sshFxpName:
   347  			sid, data := unmarshalUint32(data)
   348  			if sid != id {
   349  				return nil, &unexpectedIDErr{id, sid}
   350  			}
   351  			count, data := unmarshalUint32(data)
   352  			for i := uint32(0); i < count; i++ {
   353  				var filename string
   354  				filename, data = unmarshalString(data)
   355  				_, data = unmarshalString(data) // discard longname
   356  				var attr *FileStat
   357  				attr, data = unmarshalAttrs(data)
   358  				if filename == "." || filename == ".." {
   359  					continue
   360  				}
   361  				attrs = append(attrs, fileInfoFromStat(attr, path.Base(filename)))
   362  			}
   363  		case sshFxpStatus:
   364  			// TODO(dfc) scope warning!
   365  			err = normaliseError(unmarshalStatus(id, data))
   366  			done = true
   367  		default:
   368  			return nil, unimplementedPacketErr(typ)
   369  		}
   370  	}
   371  	if err == io.EOF {
   372  		err = nil
   373  	}
   374  	return attrs, err
   375  }
   376  
   377  func (c *Client) opendir(path string) (string, error) {
   378  	id := c.nextID()
   379  	typ, data, err := c.sendPacket(nil, &sshFxpOpendirPacket{
   380  		ID:   id,
   381  		Path: path,
   382  	})
   383  	if err != nil {
   384  		return "", err
   385  	}
   386  	switch typ {
   387  	case sshFxpHandle:
   388  		sid, data := unmarshalUint32(data)
   389  		if sid != id {
   390  			return "", &unexpectedIDErr{id, sid}
   391  		}
   392  		handle, _ := unmarshalString(data)
   393  		return handle, nil
   394  	case sshFxpStatus:
   395  		return "", normaliseError(unmarshalStatus(id, data))
   396  	default:
   397  		return "", unimplementedPacketErr(typ)
   398  	}
   399  }
   400  
   401  // Stat returns a FileInfo structure describing the file specified by path 'p'.
   402  // If 'p' is a symbolic link, the returned FileInfo structure describes the referent file.
   403  func (c *Client) Stat(p string) (os.FileInfo, error) {
   404  	fs, err := c.stat(p)
   405  	if err != nil {
   406  		return nil, err
   407  	}
   408  	return fileInfoFromStat(fs, path.Base(p)), nil
   409  }
   410  
   411  // Lstat returns a FileInfo structure describing the file specified by path 'p'.
   412  // If 'p' is a symbolic link, the returned FileInfo structure describes the symbolic link.
   413  func (c *Client) Lstat(p string) (os.FileInfo, error) {
   414  	id := c.nextID()
   415  	typ, data, err := c.sendPacket(nil, &sshFxpLstatPacket{
   416  		ID:   id,
   417  		Path: p,
   418  	})
   419  	if err != nil {
   420  		return nil, err
   421  	}
   422  	switch typ {
   423  	case sshFxpAttrs:
   424  		sid, data := unmarshalUint32(data)
   425  		if sid != id {
   426  			return nil, &unexpectedIDErr{id, sid}
   427  		}
   428  		attr, _ := unmarshalAttrs(data)
   429  		return fileInfoFromStat(attr, path.Base(p)), nil
   430  	case sshFxpStatus:
   431  		return nil, normaliseError(unmarshalStatus(id, data))
   432  	default:
   433  		return nil, unimplementedPacketErr(typ)
   434  	}
   435  }
   436  
   437  // ReadLink reads the target of a symbolic link.
   438  func (c *Client) ReadLink(p string) (string, error) {
   439  	id := c.nextID()
   440  	typ, data, err := c.sendPacket(nil, &sshFxpReadlinkPacket{
   441  		ID:   id,
   442  		Path: p,
   443  	})
   444  	if err != nil {
   445  		return "", err
   446  	}
   447  	switch typ {
   448  	case sshFxpName:
   449  		sid, data := unmarshalUint32(data)
   450  		if sid != id {
   451  			return "", &unexpectedIDErr{id, sid}
   452  		}
   453  		count, data := unmarshalUint32(data)
   454  		if count != 1 {
   455  			return "", unexpectedCount(1, count)
   456  		}
   457  		filename, _ := unmarshalString(data) // ignore dummy attributes
   458  		return filename, nil
   459  	case sshFxpStatus:
   460  		return "", normaliseError(unmarshalStatus(id, data))
   461  	default:
   462  		return "", unimplementedPacketErr(typ)
   463  	}
   464  }
   465  
   466  // Link creates a hard link at 'newname', pointing at the same inode as 'oldname'
   467  func (c *Client) Link(oldname, newname string) error {
   468  	id := c.nextID()
   469  	typ, data, err := c.sendPacket(nil, &sshFxpHardlinkPacket{
   470  		ID:      id,
   471  		Oldpath: oldname,
   472  		Newpath: newname,
   473  	})
   474  	if err != nil {
   475  		return err
   476  	}
   477  	switch typ {
   478  	case sshFxpStatus:
   479  		return normaliseError(unmarshalStatus(id, data))
   480  	default:
   481  		return unimplementedPacketErr(typ)
   482  	}
   483  }
   484  
   485  // Symlink creates a symbolic link at 'newname', pointing at target 'oldname'
   486  func (c *Client) Symlink(oldname, newname string) error {
   487  	id := c.nextID()
   488  	typ, data, err := c.sendPacket(nil, &sshFxpSymlinkPacket{
   489  		ID:         id,
   490  		Linkpath:   newname,
   491  		Targetpath: oldname,
   492  	})
   493  	if err != nil {
   494  		return err
   495  	}
   496  	switch typ {
   497  	case sshFxpStatus:
   498  		return normaliseError(unmarshalStatus(id, data))
   499  	default:
   500  		return unimplementedPacketErr(typ)
   501  	}
   502  }
   503  
   504  func (c *Client) setfstat(handle string, flags uint32, attrs interface{}) error {
   505  	id := c.nextID()
   506  	typ, data, err := c.sendPacket(nil, &sshFxpFsetstatPacket{
   507  		ID:     id,
   508  		Handle: handle,
   509  		Flags:  flags,
   510  		Attrs:  attrs,
   511  	})
   512  	if err != nil {
   513  		return err
   514  	}
   515  	switch typ {
   516  	case sshFxpStatus:
   517  		return normaliseError(unmarshalStatus(id, data))
   518  	default:
   519  		return unimplementedPacketErr(typ)
   520  	}
   521  }
   522  
   523  // setstat is a convience wrapper to allow for changing of various parts of the file descriptor.
   524  func (c *Client) setstat(path string, flags uint32, attrs interface{}) error {
   525  	id := c.nextID()
   526  	typ, data, err := c.sendPacket(nil, &sshFxpSetstatPacket{
   527  		ID:    id,
   528  		Path:  path,
   529  		Flags: flags,
   530  		Attrs: attrs,
   531  	})
   532  	if err != nil {
   533  		return err
   534  	}
   535  	switch typ {
   536  	case sshFxpStatus:
   537  		return normaliseError(unmarshalStatus(id, data))
   538  	default:
   539  		return unimplementedPacketErr(typ)
   540  	}
   541  }
   542  
   543  // Chtimes changes the access and modification times of the named file.
   544  func (c *Client) Chtimes(path string, atime time.Time, mtime time.Time) error {
   545  	type times struct {
   546  		Atime uint32
   547  		Mtime uint32
   548  	}
   549  	attrs := times{uint32(atime.Unix()), uint32(mtime.Unix())}
   550  	return c.setstat(path, sshFileXferAttrACmodTime, attrs)
   551  }
   552  
   553  // Chown changes the user and group owners of the named file.
   554  func (c *Client) Chown(path string, uid, gid int) error {
   555  	type owner struct {
   556  		UID uint32
   557  		GID uint32
   558  	}
   559  	attrs := owner{uint32(uid), uint32(gid)}
   560  	return c.setstat(path, sshFileXferAttrUIDGID, attrs)
   561  }
   562  
   563  // Chmod changes the permissions of the named file.
   564  //
   565  // Chmod does not apply a umask, because even retrieving the umask is not
   566  // possible in a portable way without causing a race condition. Callers
   567  // should mask off umask bits, if desired.
   568  func (c *Client) Chmod(path string, mode os.FileMode) error {
   569  	return c.setstat(path, sshFileXferAttrPermissions, toChmodPerm(mode))
   570  }
   571  
   572  // Truncate sets the size of the named file. Although it may be safely assumed
   573  // that if the size is less than its current size it will be truncated to fit,
   574  // the SFTP protocol does not specify what behavior the server should do when setting
   575  // size greater than the current size.
   576  func (c *Client) Truncate(path string, size int64) error {
   577  	return c.setstat(path, sshFileXferAttrSize, uint64(size))
   578  }
   579  
   580  // Open opens the named file for reading. If successful, methods on the
   581  // returned file can be used for reading; the associated file descriptor
   582  // has mode O_RDONLY.
   583  func (c *Client) Open(path string) (*File, error) {
   584  	return c.open(path, flags(os.O_RDONLY))
   585  }
   586  
   587  // OpenFile is the generalized open call; most users will use Open or
   588  // Create instead. It opens the named file with specified flag (O_RDONLY
   589  // etc.). If successful, methods on the returned File can be used for I/O.
   590  func (c *Client) OpenFile(path string, f int) (*File, error) {
   591  	return c.open(path, flags(f))
   592  }
   593  
   594  func (c *Client) open(path string, pflags uint32) (*File, error) {
   595  	id := c.nextID()
   596  	typ, data, err := c.sendPacket(nil, &sshFxpOpenPacket{
   597  		ID:     id,
   598  		Path:   path,
   599  		Pflags: pflags,
   600  	})
   601  	if err != nil {
   602  		return nil, err
   603  	}
   604  	switch typ {
   605  	case sshFxpHandle:
   606  		sid, data := unmarshalUint32(data)
   607  		if sid != id {
   608  			return nil, &unexpectedIDErr{id, sid}
   609  		}
   610  		handle, _ := unmarshalString(data)
   611  		return &File{c: c, path: path, handle: handle}, nil
   612  	case sshFxpStatus:
   613  		return nil, normaliseError(unmarshalStatus(id, data))
   614  	default:
   615  		return nil, unimplementedPacketErr(typ)
   616  	}
   617  }
   618  
   619  // close closes a handle handle previously returned in the response
   620  // to SSH_FXP_OPEN or SSH_FXP_OPENDIR. The handle becomes invalid
   621  // immediately after this request has been sent.
   622  func (c *Client) close(handle string) error {
   623  	id := c.nextID()
   624  	typ, data, err := c.sendPacket(nil, &sshFxpClosePacket{
   625  		ID:     id,
   626  		Handle: handle,
   627  	})
   628  	if err != nil {
   629  		return err
   630  	}
   631  	switch typ {
   632  	case sshFxpStatus:
   633  		return normaliseError(unmarshalStatus(id, data))
   634  	default:
   635  		return unimplementedPacketErr(typ)
   636  	}
   637  }
   638  
   639  func (c *Client) stat(path string) (*FileStat, error) {
   640  	id := c.nextID()
   641  	typ, data, err := c.sendPacket(nil, &sshFxpStatPacket{
   642  		ID:   id,
   643  		Path: path,
   644  	})
   645  	if err != nil {
   646  		return nil, err
   647  	}
   648  	switch typ {
   649  	case sshFxpAttrs:
   650  		sid, data := unmarshalUint32(data)
   651  		if sid != id {
   652  			return nil, &unexpectedIDErr{id, sid}
   653  		}
   654  		attr, _ := unmarshalAttrs(data)
   655  		return attr, nil
   656  	case sshFxpStatus:
   657  		return nil, normaliseError(unmarshalStatus(id, data))
   658  	default:
   659  		return nil, unimplementedPacketErr(typ)
   660  	}
   661  }
   662  
   663  func (c *Client) fstat(handle string) (*FileStat, error) {
   664  	id := c.nextID()
   665  	typ, data, err := c.sendPacket(nil, &sshFxpFstatPacket{
   666  		ID:     id,
   667  		Handle: handle,
   668  	})
   669  	if err != nil {
   670  		return nil, err
   671  	}
   672  	switch typ {
   673  	case sshFxpAttrs:
   674  		sid, data := unmarshalUint32(data)
   675  		if sid != id {
   676  			return nil, &unexpectedIDErr{id, sid}
   677  		}
   678  		attr, _ := unmarshalAttrs(data)
   679  		return attr, nil
   680  	case sshFxpStatus:
   681  		return nil, normaliseError(unmarshalStatus(id, data))
   682  	default:
   683  		return nil, unimplementedPacketErr(typ)
   684  	}
   685  }
   686  
   687  // StatVFS retrieves VFS statistics from a remote host.
   688  //
   689  // It implements the statvfs@openssh.com SSH_FXP_EXTENDED feature
   690  // from http://www.opensource.apple.com/source/OpenSSH/OpenSSH-175/openssh/PROTOCOL?txt.
   691  func (c *Client) StatVFS(path string) (*StatVFS, error) {
   692  	// send the StatVFS packet to the server
   693  	id := c.nextID()
   694  	typ, data, err := c.sendPacket(nil, &sshFxpStatvfsPacket{
   695  		ID:   id,
   696  		Path: path,
   697  	})
   698  	if err != nil {
   699  		return nil, err
   700  	}
   701  
   702  	switch typ {
   703  	// server responded with valid data
   704  	case sshFxpExtendedReply:
   705  		var response StatVFS
   706  		err = binary.Read(bytes.NewReader(data), binary.BigEndian, &response)
   707  		if err != nil {
   708  			return nil, errors.New("can not parse reply")
   709  		}
   710  
   711  		return &response, nil
   712  
   713  	// the resquest failed
   714  	case sshFxpStatus:
   715  		return nil, normaliseError(unmarshalStatus(id, data))
   716  
   717  	default:
   718  		return nil, unimplementedPacketErr(typ)
   719  	}
   720  }
   721  
   722  // Join joins any number of path elements into a single path, adding a
   723  // separating slash if necessary. The result is Cleaned; in particular, all
   724  // empty strings are ignored.
   725  func (c *Client) Join(elem ...string) string { return path.Join(elem...) }
   726  
   727  // Remove removes the specified file or directory. An error will be returned if no
   728  // file or directory with the specified path exists, or if the specified directory
   729  // is not empty.
   730  func (c *Client) Remove(path string) error {
   731  	err := c.removeFile(path)
   732  	// some servers, *cough* osx *cough*, return EPERM, not ENODIR.
   733  	// serv-u returns ssh_FX_FILE_IS_A_DIRECTORY
   734  	// EPERM is converted to os.ErrPermission so it is not a StatusError
   735  	if err, ok := err.(*StatusError); ok {
   736  		switch err.Code {
   737  		case sshFxFailure, sshFxFileIsADirectory:
   738  			return c.RemoveDirectory(path)
   739  		}
   740  	}
   741  	if os.IsPermission(err) {
   742  		return c.RemoveDirectory(path)
   743  	}
   744  	return err
   745  }
   746  
   747  func (c *Client) removeFile(path string) error {
   748  	id := c.nextID()
   749  	typ, data, err := c.sendPacket(nil, &sshFxpRemovePacket{
   750  		ID:       id,
   751  		Filename: path,
   752  	})
   753  	if err != nil {
   754  		return err
   755  	}
   756  	switch typ {
   757  	case sshFxpStatus:
   758  		return normaliseError(unmarshalStatus(id, data))
   759  	default:
   760  		return unimplementedPacketErr(typ)
   761  	}
   762  }
   763  
   764  // RemoveDirectory removes a directory path.
   765  func (c *Client) RemoveDirectory(path string) error {
   766  	id := c.nextID()
   767  	typ, data, err := c.sendPacket(nil, &sshFxpRmdirPacket{
   768  		ID:   id,
   769  		Path: path,
   770  	})
   771  	if err != nil {
   772  		return err
   773  	}
   774  	switch typ {
   775  	case sshFxpStatus:
   776  		return normaliseError(unmarshalStatus(id, data))
   777  	default:
   778  		return unimplementedPacketErr(typ)
   779  	}
   780  }
   781  
   782  // Rename renames a file.
   783  func (c *Client) Rename(oldname, newname string) error {
   784  	id := c.nextID()
   785  	typ, data, err := c.sendPacket(nil, &sshFxpRenamePacket{
   786  		ID:      id,
   787  		Oldpath: oldname,
   788  		Newpath: newname,
   789  	})
   790  	if err != nil {
   791  		return err
   792  	}
   793  	switch typ {
   794  	case sshFxpStatus:
   795  		return normaliseError(unmarshalStatus(id, data))
   796  	default:
   797  		return unimplementedPacketErr(typ)
   798  	}
   799  }
   800  
   801  // PosixRename renames a file using the posix-rename@openssh.com extension
   802  // which will replace newname if it already exists.
   803  func (c *Client) PosixRename(oldname, newname string) error {
   804  	id := c.nextID()
   805  	typ, data, err := c.sendPacket(nil, &sshFxpPosixRenamePacket{
   806  		ID:      id,
   807  		Oldpath: oldname,
   808  		Newpath: newname,
   809  	})
   810  	if err != nil {
   811  		return err
   812  	}
   813  	switch typ {
   814  	case sshFxpStatus:
   815  		return normaliseError(unmarshalStatus(id, data))
   816  	default:
   817  		return unimplementedPacketErr(typ)
   818  	}
   819  }
   820  
   821  // RealPath can be used to have the server canonicalize any given path name to an absolute path.
   822  //
   823  // This is useful for converting path names containing ".." components,
   824  // or relative pathnames without a leading slash into absolute paths.
   825  func (c *Client) RealPath(path string) (string, error) {
   826  	id := c.nextID()
   827  	typ, data, err := c.sendPacket(nil, &sshFxpRealpathPacket{
   828  		ID:   id,
   829  		Path: path,
   830  	})
   831  	if err != nil {
   832  		return "", err
   833  	}
   834  	switch typ {
   835  	case sshFxpName:
   836  		sid, data := unmarshalUint32(data)
   837  		if sid != id {
   838  			return "", &unexpectedIDErr{id, sid}
   839  		}
   840  		count, data := unmarshalUint32(data)
   841  		if count != 1 {
   842  			return "", unexpectedCount(1, count)
   843  		}
   844  		filename, _ := unmarshalString(data) // ignore attributes
   845  		return filename, nil
   846  	case sshFxpStatus:
   847  		return "", normaliseError(unmarshalStatus(id, data))
   848  	default:
   849  		return "", unimplementedPacketErr(typ)
   850  	}
   851  }
   852  
   853  // Getwd returns the current working directory of the server. Operations
   854  // involving relative paths will be based at this location.
   855  func (c *Client) Getwd() (string, error) {
   856  	return c.RealPath(".")
   857  }
   858  
   859  // Mkdir creates the specified directory. An error will be returned if a file or
   860  // directory with the specified path already exists, or if the directory's
   861  // parent folder does not exist (the method cannot create complete paths).
   862  func (c *Client) Mkdir(path string) error {
   863  	id := c.nextID()
   864  	typ, data, err := c.sendPacket(nil, &sshFxpMkdirPacket{
   865  		ID:   id,
   866  		Path: path,
   867  	})
   868  	if err != nil {
   869  		return err
   870  	}
   871  	switch typ {
   872  	case sshFxpStatus:
   873  		return normaliseError(unmarshalStatus(id, data))
   874  	default:
   875  		return unimplementedPacketErr(typ)
   876  	}
   877  }
   878  
   879  // MkdirAll creates a directory named path, along with any necessary parents,
   880  // and returns nil, or else returns an error.
   881  // If path is already a directory, MkdirAll does nothing and returns nil.
   882  // If path contains a regular file, an error is returned
   883  func (c *Client) MkdirAll(path string) error {
   884  	// Most of this code mimics https://golang.org/src/os/path.go?s=514:561#L13
   885  	// Fast path: if we can tell whether path is a directory or file, stop with success or error.
   886  	dir, err := c.Stat(path)
   887  	if err == nil {
   888  		if dir.IsDir() {
   889  			return nil
   890  		}
   891  		return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
   892  	}
   893  
   894  	// Slow path: make sure parent exists and then call Mkdir for path.
   895  	i := len(path)
   896  	for i > 0 && path[i-1] == '/' { // Skip trailing path separator.
   897  		i--
   898  	}
   899  
   900  	j := i
   901  	for j > 0 && path[j-1] != '/' { // Scan backward over element.
   902  		j--
   903  	}
   904  
   905  	if j > 1 {
   906  		// Create parent
   907  		err = c.MkdirAll(path[0 : j-1])
   908  		if err != nil {
   909  			return err
   910  		}
   911  	}
   912  
   913  	// Parent now exists; invoke Mkdir and use its result.
   914  	err = c.Mkdir(path)
   915  	if err != nil {
   916  		// Handle arguments like "foo/." by
   917  		// double-checking that directory doesn't exist.
   918  		dir, err1 := c.Lstat(path)
   919  		if err1 == nil && dir.IsDir() {
   920  			return nil
   921  		}
   922  		return err
   923  	}
   924  	return nil
   925  }
   926  
   927  // RemoveAll delete files recursively in the directory and Recursively delete subdirectories.
   928  // An error will be returned if no file or directory with the specified path exists
   929  func (c *Client) RemoveAll(path string) error {
   930  
   931  	// Get the file/directory information
   932  	fi, err := c.Stat(path)
   933  	if err != nil {
   934  		return err
   935  	}
   936  
   937  	if fi.IsDir() {
   938  		// Delete files recursively in the directory
   939  		files, err := c.ReadDir(path)
   940  		if err != nil {
   941  			return err
   942  		}
   943  
   944  		for _, file := range files {
   945  			if file.IsDir() {
   946  				// Recursively delete subdirectories
   947  				err = c.RemoveAll(path + "/" + file.Name())
   948  				if err != nil {
   949  					return err
   950  				}
   951  			} else {
   952  				// Delete individual files
   953  				err = c.Remove(path + "/" + file.Name())
   954  				if err != nil {
   955  					return err
   956  				}
   957  			}
   958  		}
   959  
   960  	}
   961  
   962  	return c.Remove(path)
   963  
   964  }
   965  
   966  // File represents a remote file.
   967  type File struct {
   968  	c      *Client
   969  	path   string
   970  	handle string
   971  
   972  	mu     sync.Mutex
   973  	offset int64 // current offset within remote file
   974  }
   975  
   976  // Close closes the File, rendering it unusable for I/O. It returns an
   977  // error, if any.
   978  func (f *File) Close() error {
   979  	return f.c.close(f.handle)
   980  }
   981  
   982  // Name returns the name of the file as presented to Open or Create.
   983  func (f *File) Name() string {
   984  	return f.path
   985  }
   986  
   987  // Read reads up to len(b) bytes from the File. It returns the number of bytes
   988  // read and an error, if any. Read follows io.Reader semantics, so when Read
   989  // encounters an error or EOF condition after successfully reading n > 0 bytes,
   990  // it returns the number of bytes read.
   991  //
   992  // To maximise throughput for transferring the entire file (especially
   993  // over high latency links) it is recommended to use WriteTo rather
   994  // than calling Read multiple times. io.Copy will do this
   995  // automatically.
   996  func (f *File) Read(b []byte) (int, error) {
   997  	f.mu.Lock()
   998  	defer f.mu.Unlock()
   999  
  1000  	n, err := f.ReadAt(b, f.offset)
  1001  	f.offset += int64(n)
  1002  	return n, err
  1003  }
  1004  
  1005  // readChunkAt attempts to read the whole entire length of the buffer from the file starting at the offset.
  1006  // It will continue progressively reading into the buffer until it fills the whole buffer, or an error occurs.
  1007  func (f *File) readChunkAt(ch chan result, b []byte, off int64) (n int, err error) {
  1008  	for err == nil && n < len(b) {
  1009  		id := f.c.nextID()
  1010  		typ, data, err := f.c.sendPacket(ch, &sshFxpReadPacket{
  1011  			ID:     id,
  1012  			Handle: f.handle,
  1013  			Offset: uint64(off) + uint64(n),
  1014  			Len:    uint32(len(b) - n),
  1015  		})
  1016  		if err != nil {
  1017  			return n, err
  1018  		}
  1019  
  1020  		switch typ {
  1021  		case sshFxpStatus:
  1022  			return n, normaliseError(unmarshalStatus(id, data))
  1023  
  1024  		case sshFxpData:
  1025  			sid, data := unmarshalUint32(data)
  1026  			if id != sid {
  1027  				return n, &unexpectedIDErr{id, sid}
  1028  			}
  1029  
  1030  			l, data := unmarshalUint32(data)
  1031  			n += copy(b[n:], data[:l])
  1032  
  1033  		default:
  1034  			return n, unimplementedPacketErr(typ)
  1035  		}
  1036  	}
  1037  
  1038  	return
  1039  }
  1040  
  1041  func (f *File) readAtSequential(b []byte, off int64) (read int, err error) {
  1042  	for read < len(b) {
  1043  		rb := b[read:]
  1044  		if len(rb) > f.c.maxPacket {
  1045  			rb = rb[:f.c.maxPacket]
  1046  		}
  1047  		n, err := f.readChunkAt(nil, rb, off+int64(read))
  1048  		if n < 0 {
  1049  			panic("sftp.File: returned negative count from readChunkAt")
  1050  		}
  1051  		if n > 0 {
  1052  			read += n
  1053  		}
  1054  		if err != nil {
  1055  			return read, err
  1056  		}
  1057  	}
  1058  	return read, nil
  1059  }
  1060  
  1061  // ReadAt reads up to len(b) byte from the File at a given offset `off`. It returns
  1062  // the number of bytes read and an error, if any. ReadAt follows io.ReaderAt semantics,
  1063  // so the file offset is not altered during the read.
  1064  func (f *File) ReadAt(b []byte, off int64) (int, error) {
  1065  	if len(b) <= f.c.maxPacket {
  1066  		// This should be able to be serviced with 1/2 requests.
  1067  		// So, just do it directly.
  1068  		return f.readChunkAt(nil, b, off)
  1069  	}
  1070  
  1071  	if f.c.disableConcurrentReads {
  1072  		return f.readAtSequential(b, off)
  1073  	}
  1074  
  1075  	// Split the read into multiple maxPacket-sized concurrent reads bounded by maxConcurrentRequests.
  1076  	// This allows writes with a suitably large buffer to transfer data at a much faster rate
  1077  	// by overlapping round trip times.
  1078  
  1079  	cancel := make(chan struct{})
  1080  
  1081  	concurrency := len(b)/f.c.maxPacket + 1
  1082  	if concurrency > f.c.maxConcurrentRequests || concurrency < 1 {
  1083  		concurrency = f.c.maxConcurrentRequests
  1084  	}
  1085  
  1086  	resPool := newResChanPool(concurrency)
  1087  
  1088  	type work struct {
  1089  		id  uint32
  1090  		res chan result
  1091  
  1092  		b   []byte
  1093  		off int64
  1094  	}
  1095  	workCh := make(chan work)
  1096  
  1097  	// Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets.
  1098  	go func() {
  1099  		defer close(workCh)
  1100  
  1101  		b := b
  1102  		offset := off
  1103  		chunkSize := f.c.maxPacket
  1104  
  1105  		for len(b) > 0 {
  1106  			rb := b
  1107  			if len(rb) > chunkSize {
  1108  				rb = rb[:chunkSize]
  1109  			}
  1110  
  1111  			id := f.c.nextID()
  1112  			res := resPool.Get()
  1113  
  1114  			f.c.dispatchRequest(res, &sshFxpReadPacket{
  1115  				ID:     id,
  1116  				Handle: f.handle,
  1117  				Offset: uint64(offset),
  1118  				Len:    uint32(chunkSize),
  1119  			})
  1120  
  1121  			select {
  1122  			case workCh <- work{id, res, rb, offset}:
  1123  			case <-cancel:
  1124  				return
  1125  			}
  1126  
  1127  			offset += int64(len(rb))
  1128  			b = b[len(rb):]
  1129  		}
  1130  	}()
  1131  
  1132  	type rErr struct {
  1133  		off int64
  1134  		err error
  1135  	}
  1136  	errCh := make(chan rErr)
  1137  
  1138  	var wg sync.WaitGroup
  1139  	wg.Add(concurrency)
  1140  	for i := 0; i < concurrency; i++ {
  1141  		// Map_i: each worker gets work, and then performs the Read into its buffer from its respective offset.
  1142  		go func() {
  1143  			defer wg.Done()
  1144  
  1145  			for packet := range workCh {
  1146  				var n int
  1147  
  1148  				s := <-packet.res
  1149  				resPool.Put(packet.res)
  1150  
  1151  				err := s.err
  1152  				if err == nil {
  1153  					switch s.typ {
  1154  					case sshFxpStatus:
  1155  						err = normaliseError(unmarshalStatus(packet.id, s.data))
  1156  
  1157  					case sshFxpData:
  1158  						sid, data := unmarshalUint32(s.data)
  1159  						if packet.id != sid {
  1160  							err = &unexpectedIDErr{packet.id, sid}
  1161  
  1162  						} else {
  1163  							l, data := unmarshalUint32(data)
  1164  							n = copy(packet.b, data[:l])
  1165  
  1166  							// For normal disk files, it is guaranteed that this will read
  1167  							// the specified number of bytes, or up to end of file.
  1168  							// This implies, if we have a short read, that means EOF.
  1169  							if n < len(packet.b) {
  1170  								err = io.EOF
  1171  							}
  1172  						}
  1173  
  1174  					default:
  1175  						err = unimplementedPacketErr(s.typ)
  1176  					}
  1177  				}
  1178  
  1179  				if err != nil {
  1180  					// return the offset as the start + how much we read before the error.
  1181  					errCh <- rErr{packet.off + int64(n), err}
  1182  					return
  1183  				}
  1184  			}
  1185  		}()
  1186  	}
  1187  
  1188  	// Wait for long tail, before closing results.
  1189  	go func() {
  1190  		wg.Wait()
  1191  		close(errCh)
  1192  	}()
  1193  
  1194  	// Reduce: collect all the results into a relevant return: the earliest offset to return an error.
  1195  	firstErr := rErr{math.MaxInt64, nil}
  1196  	for rErr := range errCh {
  1197  		if rErr.off <= firstErr.off {
  1198  			firstErr = rErr
  1199  		}
  1200  
  1201  		select {
  1202  		case <-cancel:
  1203  		default:
  1204  			// stop any more work from being distributed. (Just in case.)
  1205  			close(cancel)
  1206  		}
  1207  	}
  1208  
  1209  	if firstErr.err != nil {
  1210  		// firstErr.err != nil if and only if firstErr.off > our starting offset.
  1211  		return int(firstErr.off - off), firstErr.err
  1212  	}
  1213  
  1214  	// As per spec for io.ReaderAt, we return nil error if and only if we read everything.
  1215  	return len(b), nil
  1216  }
  1217  
  1218  // writeToSequential implements WriteTo, but works sequentially with no parallelism.
  1219  func (f *File) writeToSequential(w io.Writer) (written int64, err error) {
  1220  	b := make([]byte, f.c.maxPacket)
  1221  	ch := make(chan result, 1) // reusable channel
  1222  
  1223  	for {
  1224  		n, err := f.readChunkAt(ch, b, f.offset)
  1225  		if n < 0 {
  1226  			panic("sftp.File: returned negative count from readChunkAt")
  1227  		}
  1228  
  1229  		if n > 0 {
  1230  			f.offset += int64(n)
  1231  
  1232  			m, err := w.Write(b[:n])
  1233  			written += int64(m)
  1234  
  1235  			if err != nil {
  1236  				return written, err
  1237  			}
  1238  		}
  1239  
  1240  		if err != nil {
  1241  			if err == io.EOF {
  1242  				return written, nil // return nil explicitly.
  1243  			}
  1244  
  1245  			return written, err
  1246  		}
  1247  	}
  1248  }
  1249  
  1250  // WriteTo writes the file to the given Writer.
  1251  // The return value is the number of bytes written.
  1252  // Any error encountered during the write is also returned.
  1253  //
  1254  // This method is preferred over calling Read multiple times
  1255  // to maximise throughput for transferring the entire file,
  1256  // especially over high latency links.
  1257  func (f *File) WriteTo(w io.Writer) (written int64, err error) {
  1258  	f.mu.Lock()
  1259  	defer f.mu.Unlock()
  1260  
  1261  	if f.c.disableConcurrentReads {
  1262  		return f.writeToSequential(w)
  1263  	}
  1264  
  1265  	// For concurrency, we want to guess how many concurrent workers we should use.
  1266  	var fileStat *FileStat
  1267  	if f.c.useFstat {
  1268  		fileStat, err = f.c.fstat(f.handle)
  1269  	} else {
  1270  		fileStat, err = f.c.stat(f.path)
  1271  	}
  1272  	if err != nil {
  1273  		return 0, err
  1274  	}
  1275  
  1276  	fileSize := fileStat.Size
  1277  	if fileSize <= uint64(f.c.maxPacket) || !isRegular(fileStat.Mode) {
  1278  		// only regular files are guaranteed to return (full read) xor (partial read, next error)
  1279  		return f.writeToSequential(w)
  1280  	}
  1281  
  1282  	concurrency64 := fileSize/uint64(f.c.maxPacket) + 1 // a bad guess, but better than no guess
  1283  	if concurrency64 > uint64(f.c.maxConcurrentRequests) || concurrency64 < 1 {
  1284  		concurrency64 = uint64(f.c.maxConcurrentRequests)
  1285  	}
  1286  	// Now that concurrency64 is saturated to an int value, we know this assignment cannot possibly overflow.
  1287  	concurrency := int(concurrency64)
  1288  
  1289  	chunkSize := f.c.maxPacket
  1290  	pool := newBufPool(concurrency, chunkSize)
  1291  	resPool := newResChanPool(concurrency)
  1292  
  1293  	cancel := make(chan struct{})
  1294  	var wg sync.WaitGroup
  1295  	defer func() {
  1296  		// Once the writing Reduce phase has ended, all the feed work needs to unconditionally stop.
  1297  		close(cancel)
  1298  
  1299  		// We want to wait until all outstanding goroutines with an `f` or `f.c` reference have completed.
  1300  		// Just to be sure we don’t orphan any goroutines any hanging references.
  1301  		wg.Wait()
  1302  	}()
  1303  
  1304  	type writeWork struct {
  1305  		b   []byte
  1306  		off int64
  1307  		err error
  1308  
  1309  		next chan writeWork
  1310  	}
  1311  	writeCh := make(chan writeWork)
  1312  
  1313  	type readWork struct {
  1314  		id  uint32
  1315  		res chan result
  1316  		off int64
  1317  
  1318  		cur, next chan writeWork
  1319  	}
  1320  	readCh := make(chan readWork)
  1321  
  1322  	// Slice: hand out chunks of work on demand, with a `cur` and `next` channel built-in for sequencing.
  1323  	go func() {
  1324  		defer close(readCh)
  1325  
  1326  		off := f.offset
  1327  
  1328  		cur := writeCh
  1329  		for {
  1330  			id := f.c.nextID()
  1331  			res := resPool.Get()
  1332  
  1333  			next := make(chan writeWork)
  1334  			readWork := readWork{
  1335  				id:  id,
  1336  				res: res,
  1337  				off: off,
  1338  
  1339  				cur:  cur,
  1340  				next: next,
  1341  			}
  1342  
  1343  			f.c.dispatchRequest(res, &sshFxpReadPacket{
  1344  				ID:     id,
  1345  				Handle: f.handle,
  1346  				Offset: uint64(off),
  1347  				Len:    uint32(chunkSize),
  1348  			})
  1349  
  1350  			select {
  1351  			case readCh <- readWork:
  1352  			case <-cancel:
  1353  				return
  1354  			}
  1355  
  1356  			off += int64(chunkSize)
  1357  			cur = next
  1358  		}
  1359  	}()
  1360  
  1361  	wg.Add(concurrency)
  1362  	for i := 0; i < concurrency; i++ {
  1363  		// Map_i: each worker gets readWork, and does the Read into a buffer at the given offset.
  1364  		go func() {
  1365  			defer wg.Done()
  1366  
  1367  			for readWork := range readCh {
  1368  				var b []byte
  1369  				var n int
  1370  
  1371  				s := <-readWork.res
  1372  				resPool.Put(readWork.res)
  1373  
  1374  				err := s.err
  1375  				if err == nil {
  1376  					switch s.typ {
  1377  					case sshFxpStatus:
  1378  						err = normaliseError(unmarshalStatus(readWork.id, s.data))
  1379  
  1380  					case sshFxpData:
  1381  						sid, data := unmarshalUint32(s.data)
  1382  						if readWork.id != sid {
  1383  							err = &unexpectedIDErr{readWork.id, sid}
  1384  
  1385  						} else {
  1386  							l, data := unmarshalUint32(data)
  1387  							b = pool.Get()[:l]
  1388  							n = copy(b, data[:l])
  1389  							b = b[:n]
  1390  						}
  1391  
  1392  					default:
  1393  						err = unimplementedPacketErr(s.typ)
  1394  					}
  1395  				}
  1396  
  1397  				writeWork := writeWork{
  1398  					b:   b,
  1399  					off: readWork.off,
  1400  					err: err,
  1401  
  1402  					next: readWork.next,
  1403  				}
  1404  
  1405  				select {
  1406  				case readWork.cur <- writeWork:
  1407  				case <-cancel:
  1408  					return
  1409  				}
  1410  
  1411  				if err != nil {
  1412  					return
  1413  				}
  1414  			}
  1415  		}()
  1416  	}
  1417  
  1418  	// Reduce: serialize the results from the reads into sequential writes.
  1419  	cur := writeCh
  1420  	for {
  1421  		packet, ok := <-cur
  1422  		if !ok {
  1423  			return written, errors.New("sftp.File.WriteTo: unexpectedly closed channel")
  1424  		}
  1425  
  1426  		// Because writes are serialized, this will always be the last successfully read byte.
  1427  		f.offset = packet.off + int64(len(packet.b))
  1428  
  1429  		if len(packet.b) > 0 {
  1430  			n, err := w.Write(packet.b)
  1431  			written += int64(n)
  1432  			if err != nil {
  1433  				return written, err
  1434  			}
  1435  		}
  1436  
  1437  		if packet.err != nil {
  1438  			if packet.err == io.EOF {
  1439  				return written, nil
  1440  			}
  1441  
  1442  			return written, packet.err
  1443  		}
  1444  
  1445  		pool.Put(packet.b)
  1446  		cur = packet.next
  1447  	}
  1448  }
  1449  
  1450  // Stat returns the FileInfo structure describing file. If there is an
  1451  // error.
  1452  func (f *File) Stat() (os.FileInfo, error) {
  1453  	fs, err := f.c.fstat(f.handle)
  1454  	if err != nil {
  1455  		return nil, err
  1456  	}
  1457  	return fileInfoFromStat(fs, path.Base(f.path)), nil
  1458  }
  1459  
  1460  // Write writes len(b) bytes to the File. It returns the number of bytes
  1461  // written and an error, if any. Write returns a non-nil error when n !=
  1462  // len(b).
  1463  //
  1464  // To maximise throughput for transferring the entire file (especially
  1465  // over high latency links) it is recommended to use ReadFrom rather
  1466  // than calling Write multiple times. io.Copy will do this
  1467  // automatically.
  1468  func (f *File) Write(b []byte) (int, error) {
  1469  	f.mu.Lock()
  1470  	defer f.mu.Unlock()
  1471  
  1472  	n, err := f.WriteAt(b, f.offset)
  1473  	f.offset += int64(n)
  1474  	return n, err
  1475  }
  1476  
  1477  func (f *File) writeChunkAt(ch chan result, b []byte, off int64) (int, error) {
  1478  	typ, data, err := f.c.sendPacket(ch, &sshFxpWritePacket{
  1479  		ID:     f.c.nextID(),
  1480  		Handle: f.handle,
  1481  		Offset: uint64(off),
  1482  		Length: uint32(len(b)),
  1483  		Data:   b,
  1484  	})
  1485  	if err != nil {
  1486  		return 0, err
  1487  	}
  1488  
  1489  	switch typ {
  1490  	case sshFxpStatus:
  1491  		id, _ := unmarshalUint32(data)
  1492  		err := normaliseError(unmarshalStatus(id, data))
  1493  		if err != nil {
  1494  			return 0, err
  1495  		}
  1496  
  1497  	default:
  1498  		return 0, unimplementedPacketErr(typ)
  1499  	}
  1500  
  1501  	return len(b), nil
  1502  }
  1503  
  1504  // writeAtConcurrent implements WriterAt, but works concurrently rather than sequentially.
  1505  func (f *File) writeAtConcurrent(b []byte, off int64) (int, error) {
  1506  	// Split the write into multiple maxPacket sized concurrent writes
  1507  	// bounded by maxConcurrentRequests. This allows writes with a suitably
  1508  	// large buffer to transfer data at a much faster rate due to
  1509  	// overlapping round trip times.
  1510  
  1511  	cancel := make(chan struct{})
  1512  
  1513  	type work struct {
  1514  		id  uint32
  1515  		res chan result
  1516  
  1517  		off int64
  1518  	}
  1519  	workCh := make(chan work)
  1520  
  1521  	concurrency := len(b)/f.c.maxPacket + 1
  1522  	if concurrency > f.c.maxConcurrentRequests || concurrency < 1 {
  1523  		concurrency = f.c.maxConcurrentRequests
  1524  	}
  1525  
  1526  	pool := newResChanPool(concurrency)
  1527  
  1528  	// Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets.
  1529  	go func() {
  1530  		defer close(workCh)
  1531  
  1532  		var read int
  1533  		chunkSize := f.c.maxPacket
  1534  
  1535  		for read < len(b) {
  1536  			wb := b[read:]
  1537  			if len(wb) > chunkSize {
  1538  				wb = wb[:chunkSize]
  1539  			}
  1540  
  1541  			id := f.c.nextID()
  1542  			res := pool.Get()
  1543  			off := off + int64(read)
  1544  
  1545  			f.c.dispatchRequest(res, &sshFxpWritePacket{
  1546  				ID:     id,
  1547  				Handle: f.handle,
  1548  				Offset: uint64(off),
  1549  				Length: uint32(len(wb)),
  1550  				Data:   wb,
  1551  			})
  1552  
  1553  			select {
  1554  			case workCh <- work{id, res, off}:
  1555  			case <-cancel:
  1556  				return
  1557  			}
  1558  
  1559  			read += len(wb)
  1560  		}
  1561  	}()
  1562  
  1563  	type wErr struct {
  1564  		off int64
  1565  		err error
  1566  	}
  1567  	errCh := make(chan wErr)
  1568  
  1569  	var wg sync.WaitGroup
  1570  	wg.Add(concurrency)
  1571  	for i := 0; i < concurrency; i++ {
  1572  		// Map_i: each worker gets work, and does the Write from each buffer to its respective offset.
  1573  		go func() {
  1574  			defer wg.Done()
  1575  
  1576  			for work := range workCh {
  1577  				s := <-work.res
  1578  				pool.Put(work.res)
  1579  
  1580  				err := s.err
  1581  				if err == nil {
  1582  					switch s.typ {
  1583  					case sshFxpStatus:
  1584  						err = normaliseError(unmarshalStatus(work.id, s.data))
  1585  					default:
  1586  						err = unimplementedPacketErr(s.typ)
  1587  					}
  1588  				}
  1589  
  1590  				if err != nil {
  1591  					errCh <- wErr{work.off, err}
  1592  				}
  1593  			}
  1594  		}()
  1595  	}
  1596  
  1597  	// Wait for long tail, before closing results.
  1598  	go func() {
  1599  		wg.Wait()
  1600  		close(errCh)
  1601  	}()
  1602  
  1603  	// Reduce: collect all the results into a relevant return: the earliest offset to return an error.
  1604  	firstErr := wErr{math.MaxInt64, nil}
  1605  	for wErr := range errCh {
  1606  		if wErr.off <= firstErr.off {
  1607  			firstErr = wErr
  1608  		}
  1609  
  1610  		select {
  1611  		case <-cancel:
  1612  		default:
  1613  			// stop any more work from being distributed. (Just in case.)
  1614  			close(cancel)
  1615  		}
  1616  	}
  1617  
  1618  	if firstErr.err != nil {
  1619  		// firstErr.err != nil if and only if firstErr.off >= our starting offset.
  1620  		return int(firstErr.off - off), firstErr.err
  1621  	}
  1622  
  1623  	return len(b), nil
  1624  }
  1625  
  1626  // WriteAt writes up to len(b) byte to the File at a given offset `off`. It returns
  1627  // the number of bytes written and an error, if any. WriteAt follows io.WriterAt semantics,
  1628  // so the file offset is not altered during the write.
  1629  func (f *File) WriteAt(b []byte, off int64) (written int, err error) {
  1630  	if len(b) <= f.c.maxPacket {
  1631  		// We can do this in one write.
  1632  		return f.writeChunkAt(nil, b, off)
  1633  	}
  1634  
  1635  	if f.c.useConcurrentWrites {
  1636  		return f.writeAtConcurrent(b, off)
  1637  	}
  1638  
  1639  	ch := make(chan result, 1) // reusable channel
  1640  
  1641  	chunkSize := f.c.maxPacket
  1642  
  1643  	for written < len(b) {
  1644  		wb := b[written:]
  1645  		if len(wb) > chunkSize {
  1646  			wb = wb[:chunkSize]
  1647  		}
  1648  
  1649  		n, err := f.writeChunkAt(ch, wb, off+int64(written))
  1650  		if n > 0 {
  1651  			written += n
  1652  		}
  1653  
  1654  		if err != nil {
  1655  			return written, err
  1656  		}
  1657  	}
  1658  
  1659  	return len(b), nil
  1660  }
  1661  
  1662  // ReadFromWithConcurrency implements ReaderFrom,
  1663  // but uses the given concurrency to issue multiple requests at the same time.
  1664  //
  1665  // Giving a concurrency of less than one will default to the Client’s max concurrency.
  1666  //
  1667  // Otherwise, the given concurrency will be capped by the Client's max concurrency.
  1668  func (f *File) ReadFromWithConcurrency(r io.Reader, concurrency int) (read int64, err error) {
  1669  	// Split the write into multiple maxPacket sized concurrent writes.
  1670  	// This allows writes with a suitably large reader
  1671  	// to transfer data at a much faster rate due to overlapping round trip times.
  1672  
  1673  	cancel := make(chan struct{})
  1674  
  1675  	type work struct {
  1676  		id  uint32
  1677  		res chan result
  1678  
  1679  		off int64
  1680  	}
  1681  	workCh := make(chan work)
  1682  
  1683  	type rwErr struct {
  1684  		off int64
  1685  		err error
  1686  	}
  1687  	errCh := make(chan rwErr)
  1688  
  1689  	if concurrency > f.c.maxConcurrentRequests || concurrency < 1 {
  1690  		concurrency = f.c.maxConcurrentRequests
  1691  	}
  1692  
  1693  	pool := newResChanPool(concurrency)
  1694  
  1695  	// Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets.
  1696  	go func() {
  1697  		defer close(workCh)
  1698  
  1699  		b := make([]byte, f.c.maxPacket)
  1700  		off := f.offset
  1701  
  1702  		for {
  1703  			n, err := r.Read(b)
  1704  
  1705  			if n > 0 {
  1706  				read += int64(n)
  1707  
  1708  				id := f.c.nextID()
  1709  				res := pool.Get()
  1710  
  1711  				f.c.dispatchRequest(res, &sshFxpWritePacket{
  1712  					ID:     id,
  1713  					Handle: f.handle,
  1714  					Offset: uint64(off),
  1715  					Length: uint32(n),
  1716  					Data:   b[:n],
  1717  				})
  1718  
  1719  				select {
  1720  				case workCh <- work{id, res, off}:
  1721  				case <-cancel:
  1722  					return
  1723  				}
  1724  
  1725  				off += int64(n)
  1726  			}
  1727  
  1728  			if err != nil {
  1729  				if err != io.EOF {
  1730  					errCh <- rwErr{off, err}
  1731  				}
  1732  				return
  1733  			}
  1734  		}
  1735  	}()
  1736  
  1737  	var wg sync.WaitGroup
  1738  	wg.Add(concurrency)
  1739  	for i := 0; i < concurrency; i++ {
  1740  		// Map_i: each worker gets work, and does the Write from each buffer to its respective offset.
  1741  		go func() {
  1742  			defer wg.Done()
  1743  
  1744  			for work := range workCh {
  1745  				s := <-work.res
  1746  				pool.Put(work.res)
  1747  
  1748  				err := s.err
  1749  				if err == nil {
  1750  					switch s.typ {
  1751  					case sshFxpStatus:
  1752  						err = normaliseError(unmarshalStatus(work.id, s.data))
  1753  					default:
  1754  						err = unimplementedPacketErr(s.typ)
  1755  					}
  1756  				}
  1757  
  1758  				if err != nil {
  1759  					errCh <- rwErr{work.off, err}
  1760  				}
  1761  			}
  1762  		}()
  1763  	}
  1764  
  1765  	// Wait for long tail, before closing results.
  1766  	go func() {
  1767  		wg.Wait()
  1768  		close(errCh)
  1769  	}()
  1770  
  1771  	// Reduce: Collect all the results into a relevant return: the earliest offset to return an error.
  1772  	firstErr := rwErr{math.MaxInt64, nil}
  1773  	for rwErr := range errCh {
  1774  		if rwErr.off <= firstErr.off {
  1775  			firstErr = rwErr
  1776  		}
  1777  
  1778  		select {
  1779  		case <-cancel:
  1780  		default:
  1781  			// stop any more work from being distributed.
  1782  			close(cancel)
  1783  		}
  1784  	}
  1785  
  1786  	if firstErr.err != nil {
  1787  		// firstErr.err != nil if and only if firstErr.off is a valid offset.
  1788  		//
  1789  		// firstErr.off will then be the lesser of:
  1790  		// * the offset of the first error from writing,
  1791  		// * the last successfully read offset.
  1792  		//
  1793  		// This could be less than the last successfully written offset,
  1794  		// which is the whole reason for the UseConcurrentWrites() ClientOption.
  1795  		//
  1796  		// Callers are responsible for truncating any SFTP files to a safe length.
  1797  		f.offset = firstErr.off
  1798  
  1799  		// ReadFrom is defined to return the read bytes, regardless of any writer errors.
  1800  		return read, firstErr.err
  1801  	}
  1802  
  1803  	f.offset += read
  1804  	return read, nil
  1805  }
  1806  
  1807  // ReadFrom reads data from r until EOF and writes it to the file. The return
  1808  // value is the number of bytes read. Any error except io.EOF encountered
  1809  // during the read is also returned.
  1810  //
  1811  // This method is preferred over calling Write multiple times
  1812  // to maximise throughput for transferring the entire file,
  1813  // especially over high-latency links.
  1814  func (f *File) ReadFrom(r io.Reader) (int64, error) {
  1815  	f.mu.Lock()
  1816  	defer f.mu.Unlock()
  1817  
  1818  	if f.c.useConcurrentWrites {
  1819  		var remain int64
  1820  		switch r := r.(type) {
  1821  		case interface{ Len() int }:
  1822  			remain = int64(r.Len())
  1823  
  1824  		case interface{ Size() int64 }:
  1825  			remain = r.Size()
  1826  
  1827  		case *io.LimitedReader:
  1828  			remain = r.N
  1829  
  1830  		case interface{ Stat() (os.FileInfo, error) }:
  1831  			info, err := r.Stat()
  1832  			if err == nil {
  1833  				remain = info.Size()
  1834  			}
  1835  		}
  1836  
  1837  		if remain < 0 {
  1838  			// We can strongly assert that we want default max concurrency here.
  1839  			return f.ReadFromWithConcurrency(r, f.c.maxConcurrentRequests)
  1840  		}
  1841  
  1842  		if remain > int64(f.c.maxPacket) {
  1843  			// Otherwise, only use concurrency, if it would be at least two packets.
  1844  
  1845  			// This is the best reasonable guess we can make.
  1846  			concurrency64 := remain/int64(f.c.maxPacket) + 1
  1847  
  1848  			// We need to cap this value to an `int` size value to avoid overflow on 32-bit machines.
  1849  			// So, we may as well pre-cap it to `f.c.maxConcurrentRequests`.
  1850  			if concurrency64 > int64(f.c.maxConcurrentRequests) {
  1851  				concurrency64 = int64(f.c.maxConcurrentRequests)
  1852  			}
  1853  
  1854  			return f.ReadFromWithConcurrency(r, int(concurrency64))
  1855  		}
  1856  	}
  1857  
  1858  	ch := make(chan result, 1) // reusable channel
  1859  
  1860  	b := make([]byte, f.c.maxPacket)
  1861  
  1862  	var read int64
  1863  	for {
  1864  		n, err := r.Read(b)
  1865  		if n < 0 {
  1866  			panic("sftp.File: reader returned negative count from Read")
  1867  		}
  1868  
  1869  		if n > 0 {
  1870  			read += int64(n)
  1871  
  1872  			m, err2 := f.writeChunkAt(ch, b[:n], f.offset)
  1873  			f.offset += int64(m)
  1874  
  1875  			if err == nil {
  1876  				err = err2
  1877  			}
  1878  		}
  1879  
  1880  		if err != nil {
  1881  			if err == io.EOF {
  1882  				return read, nil // return nil explicitly.
  1883  			}
  1884  
  1885  			return read, err
  1886  		}
  1887  	}
  1888  }
  1889  
  1890  // Seek implements io.Seeker by setting the client offset for the next Read or
  1891  // Write. It returns the next offset read. Seeking before or after the end of
  1892  // the file is undefined. Seeking relative to the end calls Stat.
  1893  func (f *File) Seek(offset int64, whence int) (int64, error) {
  1894  	f.mu.Lock()
  1895  	defer f.mu.Unlock()
  1896  
  1897  	switch whence {
  1898  	case io.SeekStart:
  1899  	case io.SeekCurrent:
  1900  		offset += f.offset
  1901  	case io.SeekEnd:
  1902  		fi, err := f.Stat()
  1903  		if err != nil {
  1904  			return f.offset, err
  1905  		}
  1906  		offset += fi.Size()
  1907  	default:
  1908  		return f.offset, unimplementedSeekWhence(whence)
  1909  	}
  1910  
  1911  	if offset < 0 {
  1912  		return f.offset, os.ErrInvalid
  1913  	}
  1914  
  1915  	f.offset = offset
  1916  	return f.offset, nil
  1917  }
  1918  
  1919  // Chown changes the uid/gid of the current file.
  1920  func (f *File) Chown(uid, gid int) error {
  1921  	return f.c.Chown(f.path, uid, gid)
  1922  }
  1923  
  1924  // Chmod changes the permissions of the current file.
  1925  //
  1926  // See Client.Chmod for details.
  1927  func (f *File) Chmod(mode os.FileMode) error {
  1928  	return f.c.setfstat(f.handle, sshFileXferAttrPermissions, toChmodPerm(mode))
  1929  }
  1930  
  1931  // Sync requests a flush of the contents of a File to stable storage.
  1932  //
  1933  // Sync requires the server to support the fsync@openssh.com extension.
  1934  func (f *File) Sync() error {
  1935  	id := f.c.nextID()
  1936  	typ, data, err := f.c.sendPacket(nil, &sshFxpFsyncPacket{
  1937  		ID:     id,
  1938  		Handle: f.handle,
  1939  	})
  1940  
  1941  	switch {
  1942  	case err != nil:
  1943  		return err
  1944  	case typ == sshFxpStatus:
  1945  		return normaliseError(unmarshalStatus(id, data))
  1946  	default:
  1947  		return &unexpectedPacketErr{want: sshFxpStatus, got: typ}
  1948  	}
  1949  }
  1950  
  1951  // Truncate sets the size of the current file. Although it may be safely assumed
  1952  // that if the size is less than its current size it will be truncated to fit,
  1953  // the SFTP protocol does not specify what behavior the server should do when setting
  1954  // size greater than the current size.
  1955  // We send a SSH_FXP_FSETSTAT here since we have a file handle
  1956  func (f *File) Truncate(size int64) error {
  1957  	return f.c.setfstat(f.handle, sshFileXferAttrSize, uint64(size))
  1958  }
  1959  
  1960  // normaliseError normalises an error into a more standard form that can be
  1961  // checked against stdlib errors like io.EOF or os.ErrNotExist.
  1962  func normaliseError(err error) error {
  1963  	switch err := err.(type) {
  1964  	case *StatusError:
  1965  		switch err.Code {
  1966  		case sshFxEOF:
  1967  			return io.EOF
  1968  		case sshFxNoSuchFile:
  1969  			return os.ErrNotExist
  1970  		case sshFxPermissionDenied:
  1971  			return os.ErrPermission
  1972  		case sshFxOk:
  1973  			return nil
  1974  		default:
  1975  			return err
  1976  		}
  1977  	default:
  1978  		return err
  1979  	}
  1980  }
  1981  
  1982  // flags converts the flags passed to OpenFile into ssh flags.
  1983  // Unsupported flags are ignored.
  1984  func flags(f int) uint32 {
  1985  	var out uint32
  1986  	switch f & os.O_WRONLY {
  1987  	case os.O_WRONLY:
  1988  		out |= sshFxfWrite
  1989  	case os.O_RDONLY:
  1990  		out |= sshFxfRead
  1991  	}
  1992  	if f&os.O_RDWR == os.O_RDWR {
  1993  		out |= sshFxfRead | sshFxfWrite
  1994  	}
  1995  	if f&os.O_APPEND == os.O_APPEND {
  1996  		out |= sshFxfAppend
  1997  	}
  1998  	if f&os.O_CREATE == os.O_CREATE {
  1999  		out |= sshFxfCreat
  2000  	}
  2001  	if f&os.O_TRUNC == os.O_TRUNC {
  2002  		out |= sshFxfTrunc
  2003  	}
  2004  	if f&os.O_EXCL == os.O_EXCL {
  2005  		out |= sshFxfExcl
  2006  	}
  2007  	return out
  2008  }
  2009  
  2010  // toChmodPerm converts Go permission bits to POSIX permission bits.
  2011  //
  2012  // This differs from fromFileMode in that we preserve the POSIX versions of
  2013  // setuid, setgid and sticky in m, because we've historically supported those
  2014  // bits, and we mask off any non-permission bits.
  2015  func toChmodPerm(m os.FileMode) (perm uint32) {
  2016  	const mask = os.ModePerm | s_ISUID | s_ISGID | s_ISVTX
  2017  	perm = uint32(m & mask)
  2018  
  2019  	if m&os.ModeSetuid != 0 {
  2020  		perm |= s_ISUID
  2021  	}
  2022  	if m&os.ModeSetgid != 0 {
  2023  		perm |= s_ISGID
  2024  	}
  2025  	if m&os.ModeSticky != 0 {
  2026  		perm |= s_ISVTX
  2027  	}
  2028  
  2029  	return perm
  2030  }