github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/pkg/sentry/kernel/pipe/pipe_util.go (about)

     1  // Copyright 2019 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package pipe
    16  
    17  import (
    18  	"io"
    19  	"math"
    20  
    21  	"golang.org/x/sys/unix"
    22  	"github.com/SagerNet/gvisor/pkg/abi/linux"
    23  	"github.com/SagerNet/gvisor/pkg/amutex"
    24  	"github.com/SagerNet/gvisor/pkg/context"
    25  	"github.com/SagerNet/gvisor/pkg/errors/linuxerr"
    26  	"github.com/SagerNet/gvisor/pkg/marshal/primitive"
    27  	"github.com/SagerNet/gvisor/pkg/safemem"
    28  	"github.com/SagerNet/gvisor/pkg/sentry/arch"
    29  	"github.com/SagerNet/gvisor/pkg/sync"
    30  	"github.com/SagerNet/gvisor/pkg/usermem"
    31  	"github.com/SagerNet/gvisor/pkg/waiter"
    32  )
    33  
    34  // This file contains Pipe file functionality that is tied to neither VFS nor
    35  // the old fs architecture.
    36  
    37  // Release cleans up the pipe's state.
    38  func (p *Pipe) Release(context.Context) {
    39  	p.rClose()
    40  	p.wClose()
    41  
    42  	// Wake up readers and writers.
    43  	p.Notify(waiter.ReadableEvents | waiter.WritableEvents)
    44  }
    45  
    46  // Read reads from the Pipe into dst.
    47  func (p *Pipe) Read(ctx context.Context, dst usermem.IOSequence) (int64, error) {
    48  	n, err := dst.CopyOutFrom(ctx, p)
    49  	if n > 0 {
    50  		p.Notify(waiter.WritableEvents)
    51  	}
    52  	return n, err
    53  }
    54  
    55  // ReadToBlocks implements safemem.Reader.ReadToBlocks for Pipe.Read.
    56  func (p *Pipe) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) {
    57  	n, err := p.read(int64(dsts.NumBytes()), func(srcs safemem.BlockSeq) (uint64, error) {
    58  		return safemem.CopySeq(dsts, srcs)
    59  	}, true /* removeFromSrc */)
    60  	return uint64(n), err
    61  }
    62  
    63  func (p *Pipe) read(count int64, f func(srcs safemem.BlockSeq) (uint64, error), removeFromSrc bool) (int64, error) {
    64  	p.mu.Lock()
    65  	defer p.mu.Unlock()
    66  	n, err := p.peekLocked(count, f)
    67  	if n > 0 && removeFromSrc {
    68  		p.consumeLocked(n)
    69  	}
    70  	return n, err
    71  }
    72  
    73  // WriteTo writes to w from the Pipe.
    74  func (p *Pipe) WriteTo(ctx context.Context, w io.Writer, count int64, dup bool) (int64, error) {
    75  	n, err := p.read(count, func(srcs safemem.BlockSeq) (uint64, error) {
    76  		return safemem.FromIOWriter{w}.WriteFromBlocks(srcs)
    77  	}, !dup /* removeFromSrc */)
    78  	if n > 0 && !dup {
    79  		p.Notify(waiter.WritableEvents)
    80  	}
    81  	return n, err
    82  }
    83  
    84  // Write writes to the Pipe from src.
    85  func (p *Pipe) Write(ctx context.Context, src usermem.IOSequence) (int64, error) {
    86  	n, err := src.CopyInTo(ctx, p)
    87  	if n > 0 {
    88  		p.Notify(waiter.ReadableEvents)
    89  	}
    90  	if linuxerr.Equals(linuxerr.EPIPE, err) {
    91  		// If we are returning EPIPE send SIGPIPE to the task.
    92  		if sendSig := linux.SignalNoInfoFuncFromContext(ctx); sendSig != nil {
    93  			sendSig(linux.SIGPIPE)
    94  		}
    95  	}
    96  	return n, err
    97  }
    98  
    99  // WriteFromBlocks implements safemem.Writer.WriteFromBlocks for Pipe.Write.
   100  func (p *Pipe) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) {
   101  	n, err := p.write(int64(srcs.NumBytes()), func(dsts safemem.BlockSeq) (uint64, error) {
   102  		return safemem.CopySeq(dsts, srcs)
   103  	})
   104  	return uint64(n), err
   105  }
   106  
   107  func (p *Pipe) write(count int64, f func(safemem.BlockSeq) (uint64, error)) (int64, error) {
   108  	p.mu.Lock()
   109  	defer p.mu.Unlock()
   110  	return p.writeLocked(count, f)
   111  }
   112  
   113  // ReadFrom reads from r to the Pipe.
   114  func (p *Pipe) ReadFrom(ctx context.Context, r io.Reader, count int64) (int64, error) {
   115  	n, err := p.write(count, func(dsts safemem.BlockSeq) (uint64, error) {
   116  		return safemem.FromIOReader{r}.ReadToBlocks(dsts)
   117  	})
   118  	if n > 0 {
   119  		p.Notify(waiter.ReadableEvents)
   120  	}
   121  	return n, err
   122  }
   123  
   124  // Readiness returns the ready events in the underlying pipe.
   125  func (p *Pipe) Readiness(mask waiter.EventMask) waiter.EventMask {
   126  	return p.rwReadiness() & mask
   127  }
   128  
   129  // Ioctl implements ioctls on the Pipe.
   130  func (p *Pipe) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {
   131  	// Switch on ioctl request.
   132  	switch int(args[1].Int()) {
   133  	case linux.FIONREAD:
   134  		v := p.queued()
   135  		if v > math.MaxInt32 {
   136  			v = math.MaxInt32 // Silently truncate.
   137  		}
   138  		// Copy result to userspace.
   139  		iocc := usermem.IOCopyContext{
   140  			IO:  io,
   141  			Ctx: ctx,
   142  			Opts: usermem.IOOpts{
   143  				AddressSpaceActive: true,
   144  			},
   145  		}
   146  		_, err := primitive.CopyInt32Out(&iocc, args[2].Pointer(), int32(v))
   147  		return 0, err
   148  	default:
   149  		return 0, unix.ENOTTY
   150  	}
   151  }
   152  
   153  // waitFor blocks until the underlying pipe has at least one reader/writer is
   154  // announced via 'wakeupChan', or until 'sleeper' is cancelled. Any call to this
   155  // function will block for either readers or writers, depending on where
   156  // 'wakeupChan' points.
   157  //
   158  // mu must be held by the caller. waitFor returns with mu held, but it will
   159  // drop mu before blocking for any reader/writers.
   160  // +checklocks:mu
   161  func waitFor(mu *sync.Mutex, wakeupChan *chan struct{}, sleeper amutex.Sleeper) bool {
   162  	// Ideally this function would simply use a condition variable. However, the
   163  	// wait needs to be interruptible via 'sleeper', so we must sychronize via a
   164  	// channel. The synchronization below relies on the fact that closing a
   165  	// channel unblocks all receives on the channel.
   166  
   167  	// Does an appropriate wakeup channel already exist? If not, create a new
   168  	// one. This is all done under f.mu to avoid races.
   169  	if *wakeupChan == nil {
   170  		*wakeupChan = make(chan struct{})
   171  	}
   172  
   173  	// Grab a local reference to the wakeup channel since it may disappear as
   174  	// soon as we drop f.mu.
   175  	wakeup := *wakeupChan
   176  
   177  	// Drop the lock and prepare to sleep.
   178  	mu.Unlock()
   179  	cancel := sleeper.SleepStart()
   180  
   181  	// Wait for either a new reader/write to be signalled via 'wakeup', or
   182  	// for the sleep to be cancelled.
   183  	select {
   184  	case <-wakeup:
   185  		sleeper.SleepFinish(true)
   186  	case <-cancel:
   187  		sleeper.SleepFinish(false)
   188  	}
   189  
   190  	// Take the lock and check if we were woken. If we were woken and
   191  	// interrupted, the former takes priority.
   192  	mu.Lock()
   193  	select {
   194  	case <-wakeup:
   195  		return true
   196  	default:
   197  		return false
   198  	}
   199  }
   200  
   201  // newHandleLocked signals a new pipe reader or writer depending on where
   202  // 'wakeupChan' points. This unblocks any corresponding reader or writer
   203  // waiting for the other end of the channel to be opened, see Fifo.waitFor.
   204  //
   205  // Precondition: the mutex protecting wakeupChan must be held.
   206  func newHandleLocked(wakeupChan *chan struct{}) {
   207  	if *wakeupChan != nil {
   208  		close(*wakeupChan)
   209  		*wakeupChan = nil
   210  	}
   211  }