github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/pkg/sentry/kernel/kcov.go (about)

     1  // Copyright 2020 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package kernel
    16  
    17  import (
    18  	"fmt"
    19  	"io"
    20  	"sync"
    21  
    22  	"github.com/SagerNet/gvisor/pkg/abi/linux"
    23  	"github.com/SagerNet/gvisor/pkg/context"
    24  	"github.com/SagerNet/gvisor/pkg/coverage"
    25  	"github.com/SagerNet/gvisor/pkg/errors/linuxerr"
    26  	"github.com/SagerNet/gvisor/pkg/hostarch"
    27  	"github.com/SagerNet/gvisor/pkg/safemem"
    28  	"github.com/SagerNet/gvisor/pkg/sentry/memmap"
    29  	"github.com/SagerNet/gvisor/pkg/sentry/mm"
    30  	"github.com/SagerNet/gvisor/pkg/sentry/pgalloc"
    31  	"github.com/SagerNet/gvisor/pkg/sentry/usage"
    32  )
    33  
    34  // kcovAreaSizeMax is the maximum number of uint64 entries allowed in the kcov
    35  // area. On Linux, the maximum is INT_MAX / 8.
    36  const kcovAreaSizeMax = 10 * 1024 * 1024
    37  
    38  // Kcov provides kernel coverage data to userspace through a memory-mapped
    39  // region, as kcov does in Linux.
    40  //
    41  // To give the illusion that the data is always up to date, we update the shared
    42  // memory every time before we return to userspace.
    43  type Kcov struct {
    44  	// mfp provides application memory. It is immutable after creation.
    45  	mfp pgalloc.MemoryFileProvider
    46  
    47  	// mu protects all of the fields below.
    48  	mu sync.RWMutex
    49  
    50  	// mode is the current kcov mode.
    51  	mode uint8
    52  
    53  	// size is the size of the mapping through which the kernel conveys coverage
    54  	// information to userspace.
    55  	size uint64
    56  
    57  	// owningTask is the task that currently owns coverage data on the system. The
    58  	// interface for kcov essentially requires that coverage is only going to a
    59  	// single task. Note that kcov should only generate coverage data for the
    60  	// owning task, but we currently generate global coverage.
    61  	owningTask *Task
    62  
    63  	// count is a locally cached version of the first uint64 in the kcov data,
    64  	// which is the number of subsequent entries representing PCs.
    65  	//
    66  	// It is used with kcovInode.countBlock(), to copy in/out the first element of
    67  	// the actual data in an efficient manner, avoid boilerplate, and prevent
    68  	// accidental garbage escapes by the temporary counts.
    69  	count uint64
    70  
    71  	mappable *mm.SpecialMappable
    72  }
    73  
    74  // NewKcov creates and returns a Kcov instance.
    75  func (k *Kernel) NewKcov() *Kcov {
    76  	return &Kcov{
    77  		mfp: k,
    78  	}
    79  }
    80  
    81  var coveragePool = sync.Pool{
    82  	New: func() interface{} {
    83  		return make([]byte, 0)
    84  	},
    85  }
    86  
    87  // TaskWork implements TaskWorker.TaskWork.
    88  func (kcov *Kcov) TaskWork(t *Task) {
    89  	kcov.mu.Lock()
    90  	defer kcov.mu.Unlock()
    91  
    92  	if kcov.mode != linux.KCOV_MODE_TRACE_PC {
    93  		return
    94  	}
    95  
    96  	rw := &kcovReadWriter{
    97  		mf: kcov.mfp.MemoryFile(),
    98  		fr: kcov.mappable.FileRange(),
    99  	}
   100  
   101  	// Read in the PC count.
   102  	if _, err := safemem.ReadFullToBlocks(rw, kcov.countBlock()); err != nil {
   103  		panic(fmt.Sprintf("Internal error reading count from kcov area: %v", err))
   104  	}
   105  
   106  	rw.off = 8 * (1 + kcov.count)
   107  	n := coverage.ConsumeCoverageData(&kcovIOWriter{rw})
   108  
   109  	// Update the pc count, based on the number of entries written. Note that if
   110  	// we reached the end of the kcov area, we may not have written everything in
   111  	// output.
   112  	kcov.count += uint64(n / 8)
   113  	rw.off = 0
   114  	if _, err := safemem.WriteFullFromBlocks(rw, kcov.countBlock()); err != nil {
   115  		panic(fmt.Sprintf("Internal error writing count to kcov area: %v", err))
   116  	}
   117  
   118  	// Re-register for future work.
   119  	t.RegisterWork(kcov)
   120  }
   121  
   122  // InitTrace performs the KCOV_INIT_TRACE ioctl.
   123  func (kcov *Kcov) InitTrace(size uint64) error {
   124  	kcov.mu.Lock()
   125  	defer kcov.mu.Unlock()
   126  
   127  	if kcov.mode != linux.KCOV_MODE_DISABLED {
   128  		return linuxerr.EBUSY
   129  	}
   130  
   131  	// To simplify all the logic around mapping, we require that the length of the
   132  	// shared region is a multiple of the system page size.
   133  	if (8*size)&(hostarch.PageSize-1) != 0 {
   134  		return linuxerr.EINVAL
   135  	}
   136  
   137  	// We need space for at least two uint64s to hold current position and a
   138  	// single PC.
   139  	if size < 2 || size > kcovAreaSizeMax {
   140  		return linuxerr.EINVAL
   141  	}
   142  
   143  	kcov.size = size
   144  	kcov.mode = linux.KCOV_MODE_INIT
   145  	return nil
   146  }
   147  
   148  // EnableTrace performs the KCOV_ENABLE_TRACE ioctl.
   149  func (kcov *Kcov) EnableTrace(ctx context.Context, traceKind uint8) error {
   150  	t := TaskFromContext(ctx)
   151  	if t == nil {
   152  		panic("kcovInode.EnableTrace() cannot be used outside of a task goroutine")
   153  	}
   154  
   155  	kcov.mu.Lock()
   156  	defer kcov.mu.Unlock()
   157  
   158  	// KCOV_ENABLE must be preceded by KCOV_INIT_TRACE and an mmap call.
   159  	if kcov.mode != linux.KCOV_MODE_INIT || kcov.mappable == nil {
   160  		return linuxerr.EINVAL
   161  	}
   162  
   163  	switch traceKind {
   164  	case linux.KCOV_TRACE_PC:
   165  		kcov.mode = linux.KCOV_MODE_TRACE_PC
   166  	case linux.KCOV_TRACE_CMP:
   167  		// We do not support KCOV_MODE_TRACE_CMP.
   168  		return linuxerr.ENOTSUP
   169  	default:
   170  		return linuxerr.EINVAL
   171  	}
   172  
   173  	if kcov.owningTask != nil && kcov.owningTask != t {
   174  		return linuxerr.EBUSY
   175  	}
   176  
   177  	kcov.owningTask = t
   178  	t.SetKcov(kcov)
   179  	t.RegisterWork(kcov)
   180  
   181  	// Clear existing coverage data; the task expects to read only coverage data
   182  	// from the time it is activated.
   183  	coverage.ClearCoverageData()
   184  	return nil
   185  }
   186  
   187  // DisableTrace performs the KCOV_DISABLE_TRACE ioctl.
   188  func (kcov *Kcov) DisableTrace(ctx context.Context) error {
   189  	kcov.mu.Lock()
   190  	defer kcov.mu.Unlock()
   191  
   192  	t := TaskFromContext(ctx)
   193  	if t == nil {
   194  		panic("kcovInode.EnableTrace() cannot be used outside of a task goroutine")
   195  	}
   196  
   197  	if t != kcov.owningTask {
   198  		return linuxerr.EINVAL
   199  	}
   200  	kcov.mode = linux.KCOV_MODE_INIT
   201  	kcov.owningTask = nil
   202  	if kcov.mappable != nil {
   203  		kcov.mappable.DecRef(ctx)
   204  		kcov.mappable = nil
   205  	}
   206  	return nil
   207  }
   208  
   209  // Clear resets the mode and clears the owning task and memory mapping for kcov.
   210  // It is called when the fd corresponding to kcov is closed. Note that the mode
   211  // needs to be set so that the next call to kcov.TaskWork() will exit early.
   212  func (kcov *Kcov) Clear(ctx context.Context) {
   213  	kcov.mu.Lock()
   214  	kcov.mode = linux.KCOV_MODE_INIT
   215  	kcov.owningTask = nil
   216  	if kcov.mappable != nil {
   217  		kcov.mappable.DecRef(ctx)
   218  		kcov.mappable = nil
   219  	}
   220  	kcov.mu.Unlock()
   221  }
   222  
   223  // OnTaskExit is called when the owning task exits. It is similar to
   224  // kcov.Clear(), except the memory mapping is not cleared, so that the same
   225  // mapping can be used in the future if kcov is enabled again by another task.
   226  func (kcov *Kcov) OnTaskExit() {
   227  	kcov.mu.Lock()
   228  	kcov.mode = linux.KCOV_MODE_INIT
   229  	kcov.owningTask = nil
   230  	kcov.mu.Unlock()
   231  }
   232  
   233  // ConfigureMMap is called by the vfs.FileDescription for this kcov instance to
   234  // implement vfs.FileDescription.ConfigureMMap.
   235  func (kcov *Kcov) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {
   236  	kcov.mu.Lock()
   237  	defer kcov.mu.Unlock()
   238  
   239  	if kcov.mode != linux.KCOV_MODE_INIT {
   240  		return linuxerr.EINVAL
   241  	}
   242  
   243  	if kcov.mappable == nil {
   244  		// Set up the kcov area.
   245  		fr, err := kcov.mfp.MemoryFile().Allocate(kcov.size*8, usage.Anonymous)
   246  		if err != nil {
   247  			return err
   248  		}
   249  
   250  		// Get the thread id for the mmap name.
   251  		t := TaskFromContext(ctx)
   252  		if t == nil {
   253  			panic("ThreadFromContext returned nil")
   254  		}
   255  		// For convenience, a special mappable is used here. Note that these mappings
   256  		// will look different under /proc/[pid]/maps than they do on Linux.
   257  		kcov.mappable = mm.NewSpecialMappable(fmt.Sprintf("[kcov:%d]", t.ThreadID()), kcov.mfp, fr)
   258  	}
   259  	kcov.mappable.IncRef()
   260  	opts.Mappable = kcov.mappable
   261  	opts.MappingIdentity = kcov.mappable
   262  	return nil
   263  }
   264  
   265  // kcovReadWriter implements safemem.Reader and safemem.Writer.
   266  type kcovReadWriter struct {
   267  	off uint64
   268  	mf  *pgalloc.MemoryFile
   269  	fr  memmap.FileRange
   270  }
   271  
   272  // ReadToBlocks implements safemem.Reader.ReadToBlocks.
   273  func (rw *kcovReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) {
   274  	if dsts.IsEmpty() {
   275  		return 0, nil
   276  	}
   277  
   278  	// Limit the read to the kcov range and check for overflow.
   279  	if rw.fr.Length() <= rw.off {
   280  		return 0, io.EOF
   281  	}
   282  	start := rw.fr.Start + rw.off
   283  	end := rw.fr.Start + rw.fr.Length()
   284  	if rend := start + dsts.NumBytes(); rend < end {
   285  		end = rend
   286  	}
   287  
   288  	// Get internal mappings.
   289  	bs, err := rw.mf.MapInternal(memmap.FileRange{start, end}, hostarch.Read)
   290  	if err != nil {
   291  		return 0, err
   292  	}
   293  
   294  	// Copy from internal mappings.
   295  	n, err := safemem.CopySeq(dsts, bs)
   296  	rw.off += n
   297  	return n, err
   298  }
   299  
   300  // WriteFromBlocks implements safemem.Writer.WriteFromBlocks.
   301  func (rw *kcovReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) {
   302  	if srcs.IsEmpty() {
   303  		return 0, nil
   304  	}
   305  
   306  	// Limit the write to the kcov area and check for overflow.
   307  	if rw.fr.Length() <= rw.off {
   308  		return 0, io.EOF
   309  	}
   310  	start := rw.fr.Start + rw.off
   311  	end := rw.fr.Start + rw.fr.Length()
   312  	if wend := start + srcs.NumBytes(); wend < end {
   313  		end = wend
   314  	}
   315  
   316  	// Get internal mapping.
   317  	bs, err := rw.mf.MapInternal(memmap.FileRange{start, end}, hostarch.Write)
   318  	if err != nil {
   319  		return 0, err
   320  	}
   321  
   322  	// Copy to internal mapping.
   323  	n, err := safemem.CopySeq(bs, srcs)
   324  	rw.off += n
   325  	return n, err
   326  }
   327  
   328  // kcovIOWriter implements io.Writer as a basic wrapper over kcovReadWriter.
   329  type kcovIOWriter struct {
   330  	rw *kcovReadWriter
   331  }
   332  
   333  // Write implements io.Writer.Write.
   334  func (w *kcovIOWriter) Write(p []byte) (int, error) {
   335  	bs := safemem.BlockSeqOf(safemem.BlockFromSafeSlice(p))
   336  	n, err := safemem.WriteFullFromBlocks(w.rw, bs)
   337  	return int(n), err
   338  }