github.com/psiphon-Labs/psiphon-tunnel-core@v2.0.28+incompatible/psiphon/packetTunnelTransport.go (about)

     1  /*
     2   * Copyright (c) 2017, Psiphon Inc.
     3   * All rights reserved.
     4   *
     5   * This program is free software: you can redistribute it and/or modify
     6   * it under the terms of the GNU General Public License as published by
     7   * the Free Software Foundation, either version 3 of the License, or
     8   * (at your option) any later version.
     9   *
    10   * This program is distributed in the hope that it will be useful,
    11   * but WITHOUT ANY WARRANTY; without even the implied warranty of
    12   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    13   * GNU General Public License for more details.
    14   *
    15   * You should have received a copy of the GNU General Public License
    16   * along with this program.  If not, see <http://www.gnu.org/licenses/>.
    17   *
    18   */
    19  
    20  package psiphon
    21  
    22  import (
    23  	"net"
    24  	"sync"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors"
    29  	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/monotime"
    30  	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/parameters"
    31  )
    32  
    33  // PacketTunnelTransport is an integration layer that presents an io.ReadWriteCloser interface
    34  // to a tun.Client as the transport for relaying packets. The Psiphon client may periodically
    35  // disconnect from and reconnect to the same or different Psiphon servers. PacketTunnelTransport
    36  // allows the Psiphon client to substitute new transport channels on-the-fly.
    37  type PacketTunnelTransport struct {
    38  	// Note: 64-bit ints used with atomic operations are placed
    39  	// at the start of struct to ensure 64-bit alignment.
    40  	// (https://golang.org/pkg/sync/atomic/#pkg-note-BUG)
    41  	readTimeout   int64
    42  	readDeadline  int64
    43  	closed        int32
    44  	workers       *sync.WaitGroup
    45  	readMutex     sync.Mutex
    46  	writeMutex    sync.Mutex
    47  	channelReady  *sync.Cond
    48  	channelMutex  sync.Mutex
    49  	channelConn   net.Conn
    50  	channelTunnel *Tunnel
    51  }
    52  
    53  // NewPacketTunnelTransport initializes a PacketTunnelTransport.
    54  func NewPacketTunnelTransport() *PacketTunnelTransport {
    55  	return &PacketTunnelTransport{
    56  		workers:      new(sync.WaitGroup),
    57  		channelReady: sync.NewCond(new(sync.Mutex)),
    58  	}
    59  }
    60  
    61  // Read implements the io.Reader interface. It uses the current transport channel
    62  // to read packet data, or waits for a new transport channel to be established
    63  // after a failure.
    64  func (p *PacketTunnelTransport) Read(data []byte) (int, error) {
    65  
    66  	p.readMutex.Lock()
    67  	defer p.readMutex.Unlock()
    68  
    69  	// getChannel will block if there's no channel, or return an error when
    70  	// closed.
    71  	channelConn, channelTunnel, err := p.getChannel()
    72  	if err != nil {
    73  		return 0, errors.Trace(err)
    74  	}
    75  
    76  	n, err := channelConn.Read(data)
    77  
    78  	if err != nil {
    79  
    80  		// This assumes that any error means the channel has failed, which
    81  		// is the case for ssh.Channel reads. io.EOF is not ignored, since
    82  		// a single ssh.Channel may EOF and still get substituted with a new
    83  		// channel.
    84  
    85  		p.failedChannel(channelConn, channelTunnel)
    86  
    87  	} else {
    88  
    89  		// Clear the read deadline now that a read has succeeded.
    90  		// See read deadline comment in Write.
    91  		atomic.StoreInt64(&p.readDeadline, 0)
    92  	}
    93  
    94  	return n, errors.Trace(err)
    95  }
    96  
    97  // Write implements the io.Writer interface. It uses the current transport channel
    98  // to write packet data, or waits for a new transport channel to be established
    99  // after a failure.
   100  func (p *PacketTunnelTransport) Write(data []byte) (int, error) {
   101  
   102  	p.writeMutex.Lock()
   103  	defer p.writeMutex.Unlock()
   104  
   105  	// getChannel will block if there's no channel, or return an error when
   106  	// closed.
   107  	channelConn, channelTunnel, err := p.getChannel()
   108  	if err != nil {
   109  		return 0, errors.Trace(err)
   110  	}
   111  
   112  	n, err := channelConn.Write(data)
   113  
   114  	if err != nil {
   115  
   116  		// This assumes that any error means the channel has failed, which
   117  		// is the case for ssh.Channel writes.
   118  
   119  		p.failedChannel(channelConn, channelTunnel)
   120  
   121  	} else {
   122  
   123  		// Set a read deadline: a successful read should occur within the deadline;
   124  		// otherwise an SSH keep alive probe is triggered to check the tunnel
   125  		// status.
   126  		//
   127  		// This scheme mirrors the tunnel dial port forward timeout mechanism
   128  		// present in port forward mode: for any port forwarded connection attempt,
   129  		// if there's a timeout before receiving a response from the server, an SSH
   130  		// keep alive probe is triggered to check the tunnel state. Unlike port
   131  		// forward mode, packet tunnel doesn't track tunneled connections (flows).
   132  		//
   133  		// Here, we deploy a heuristic based on the observation that, for most
   134  		// traffic, a packet sent from the client -- a PacketTunnelTransport.Write
   135  		// -- is followed by a packet received from the server -- a
   136  		// PacketTunnelTransport.Read. For example, a UDP DNS request followed by a
   137  		// response; or a TCP handshake sequence. The heuristic is to trigger an SSH
   138  		// keep alive probe when there is no Read within the timeout period after a
   139  		// Write. Any Read is sufficient to satisfy the deadline.
   140  		//
   141  		// To limit performance impact, we do not use, and continuously reset, a
   142  		// time.Timer; instead we record the deadline upon successful Write and
   143  		// check any set deadline during subsequent Writes. For the same reason, we
   144  		// do we use a time.Ticker to check the deadline. This means that this
   145  		// scheme depends on the host continuing to attempt to send packets in order
   146  		// to trigger the SSH keep alive.
   147  		//
   148  		// Access to readDeadline/readTimeout is not intended to be completely
   149  		// atomic.
   150  
   151  		readDeadline := monotime.Time(atomic.LoadInt64(&p.readDeadline))
   152  
   153  		if readDeadline > 0 {
   154  
   155  			if monotime.Now().After(readDeadline) {
   156  
   157  				select {
   158  				case channelTunnel.signalPortForwardFailure <- struct{}{}:
   159  				default:
   160  				}
   161  
   162  				// Clear the deadline now that a probe is triggered.
   163  				atomic.StoreInt64(&p.readDeadline, 0)
   164  			}
   165  
   166  			// Keep an existing deadline as set: subsequent writes attempts shouldn't
   167  			// extend the deadline.
   168  
   169  		} else {
   170  
   171  			readTimeout := time.Duration(atomic.LoadInt64(&p.readTimeout))
   172  			readDeadline := monotime.Now().Add(readTimeout)
   173  			atomic.StoreInt64(&p.readDeadline, int64(readDeadline))
   174  		}
   175  	}
   176  
   177  	return n, errors.Trace(err)
   178  }
   179  
   180  // Close implements the io.Closer interface. Any underlying transport channel is
   181  // closed and any blocking Read/Write calls will be interrupted.
   182  func (p *PacketTunnelTransport) Close() error {
   183  
   184  	if !atomic.CompareAndSwapInt32(&p.closed, 0, 1) {
   185  		return nil
   186  	}
   187  
   188  	p.workers.Wait()
   189  
   190  	// This broadcast is to wake up reads or writes blocking in getChannel; those
   191  	// getChannel calls should then abort on the p.closed check.
   192  	p.channelReady.Broadcast()
   193  
   194  	p.channelMutex.Lock()
   195  	if p.channelConn != nil {
   196  		p.channelConn.Close()
   197  		p.channelConn = nil
   198  	}
   199  	p.channelMutex.Unlock()
   200  
   201  	return nil
   202  }
   203  
   204  // UseTunnel sets the PacketTunnelTransport to use a new transport channel within
   205  // the specified tunnel. UseTunnel does not block on the open channel call; it spawns
   206  // a worker that calls tunnel.DialPacketTunnelChannel and uses the resulting channel.
   207  // UseTunnel has no effect once Close is called.
   208  //
   209  // Note that a blocked tunnel.DialPacketTunnelChannel with block Close;
   210  // callers should arrange for DialPacketTunnelChannel to be interrupted when
   211  // calling Close.
   212  func (p *PacketTunnelTransport) UseTunnel(tunnel *Tunnel) {
   213  
   214  	// Don't start a worker when closed, after which workers.Wait may be called.
   215  	if atomic.LoadInt32(&p.closed) == 1 {
   216  		return
   217  	}
   218  
   219  	// Spawning a new worker ensures that the latest tunnel is used to dial a
   220  	// new channel without delaying, as might happen if using a single worker
   221  	// that consumes a channel of tunnels.
   222  
   223  	p.workers.Add(1)
   224  	go func(tunnel *Tunnel) {
   225  		defer p.workers.Done()
   226  
   227  		// channelConn is a net.Conn, since some layering has been applied
   228  		// (e.g., transferstats.Conn). PacketTunnelTransport assumes the
   229  		// channelConn is ultimately an ssh.Channel, which is not a fully
   230  		// functional net.Conn.
   231  
   232  		channelConn, err := tunnel.DialPacketTunnelChannel()
   233  		if err != nil {
   234  			// Note: DialPacketTunnelChannel will signal a probe on failure,
   235  			// so it's not necessary to do so here.
   236  
   237  			NoticeWarning("dial packet tunnel channel failed: %s", err)
   238  			// TODO: retry?
   239  			return
   240  		}
   241  
   242  		p.setChannel(channelConn, tunnel)
   243  
   244  	}(tunnel)
   245  }
   246  
   247  func (p *PacketTunnelTransport) setChannel(
   248  	channelConn net.Conn, channelTunnel *Tunnel) {
   249  
   250  	p.channelMutex.Lock()
   251  
   252  	// Concurrency note: this check is within the mutex to ensure that a
   253  	// UseTunnel call concurrent with a Close call doesn't leave a channel
   254  	// set.
   255  	if atomic.LoadInt32(&p.closed) == 1 {
   256  		p.channelMutex.Unlock()
   257  		return
   258  	}
   259  
   260  	// Interrupt Read/Write calls blocking on any previous channel.
   261  	if p.channelConn != nil {
   262  		p.channelConn.Close()
   263  	}
   264  
   265  	p.channelConn = channelConn
   266  	p.channelTunnel = channelTunnel
   267  
   268  	p.channelMutex.Unlock()
   269  
   270  	// Initialize the read deadline mechanism using parameters associated with the
   271  	// new tunnel.
   272  	timeout := channelTunnel.config.
   273  		GetParameters().
   274  		GetCustom(channelTunnel.dialParams.NetworkLatencyMultiplier).
   275  		Duration(parameters.PacketTunnelReadTimeout)
   276  	atomic.StoreInt64(&p.readTimeout, int64(timeout))
   277  	atomic.StoreInt64(&p.readDeadline, 0)
   278  
   279  	p.channelReady.Broadcast()
   280  }
   281  
   282  func (p *PacketTunnelTransport) getChannel() (net.Conn, *Tunnel, error) {
   283  
   284  	var channelConn net.Conn
   285  	var channelTunnel *Tunnel
   286  
   287  	p.channelReady.L.Lock()
   288  	defer p.channelReady.L.Unlock()
   289  	for {
   290  
   291  		if atomic.LoadInt32(&p.closed) == 1 {
   292  			return nil, nil, errors.TraceNew("already closed")
   293  		}
   294  
   295  		p.channelMutex.Lock()
   296  		channelConn = p.channelConn
   297  		channelTunnel = p.channelTunnel
   298  		p.channelMutex.Unlock()
   299  		if channelConn != nil {
   300  			break
   301  		}
   302  
   303  		p.channelReady.Wait()
   304  	}
   305  
   306  	return channelConn, channelTunnel, nil
   307  }
   308  
   309  func (p *PacketTunnelTransport) failedChannel(
   310  	channelConn net.Conn, channelTunnel *Tunnel) {
   311  
   312  	// In case the channel read/write failed and the tunnel isn't
   313  	// yet in the failed state, trigger a probe.
   314  
   315  	select {
   316  	case channelTunnel.signalPortForwardFailure <- struct{}{}:
   317  	default:
   318  	}
   319  
   320  	// Clear the current channel. This will cause subsequent Read/Write
   321  	// calls to block in getChannel until a new channel is provided.
   322  	// Concurrency note: must check, within the mutex, that the channelConn
   323  	// is still the one that failed before clearing, since both Read and
   324  	// Write could call failedChannel concurrently.
   325  
   326  	p.channelMutex.Lock()
   327  	if p.channelConn == channelConn {
   328  		p.channelConn.Close()
   329  		p.channelConn = nil
   330  		p.channelTunnel = nil
   331  	}
   332  	p.channelMutex.Unlock()
   333  
   334  	// Try to establish a new channel within the current tunnel. If this
   335  	// fails, a port forward failure probe will be triggered which will
   336  	// ultimately trigger a SSH keep alive probe.
   337  	//
   338  	// One case where this is necessary is when the server closes an idle
   339  	// packet tunnel port forward for a live SSH tunnel.
   340  
   341  	p.UseTunnel(channelTunnel)
   342  }