github.com/nicocha30/gvisor-ligolo@v0.0.0-20230726075806-989fa2c0a413/pkg/sentry/kernel/kcov.go (about)

     1  // Copyright 2020 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package kernel
    16  
    17  import (
    18  	"fmt"
    19  	"io"
    20  	"sync"
    21  
    22  	"github.com/nicocha30/gvisor-ligolo/pkg/abi/linux"
    23  	"github.com/nicocha30/gvisor-ligolo/pkg/context"
    24  	"github.com/nicocha30/gvisor-ligolo/pkg/coverage"
    25  	"github.com/nicocha30/gvisor-ligolo/pkg/errors/linuxerr"
    26  	"github.com/nicocha30/gvisor-ligolo/pkg/hostarch"
    27  	"github.com/nicocha30/gvisor-ligolo/pkg/safemem"
    28  	"github.com/nicocha30/gvisor-ligolo/pkg/sentry/memmap"
    29  	"github.com/nicocha30/gvisor-ligolo/pkg/sentry/mm"
    30  	"github.com/nicocha30/gvisor-ligolo/pkg/sentry/pgalloc"
    31  	"github.com/nicocha30/gvisor-ligolo/pkg/sentry/usage"
    32  )
    33  
    34  // kcovAreaSizeMax is the maximum number of uint64 entries allowed in the kcov
    35  // area. On Linux, the maximum is INT_MAX / 8.
    36  const kcovAreaSizeMax = 10 * 1024 * 1024
    37  
    38  // Kcov provides kernel coverage data to userspace through a memory-mapped
    39  // region, as kcov does in Linux.
    40  //
    41  // To give the illusion that the data is always up to date, we update the shared
    42  // memory every time before we return to userspace.
    43  type Kcov struct {
    44  	// mfp provides application memory. It is immutable after creation.
    45  	mfp pgalloc.MemoryFileProvider
    46  
    47  	// mu protects all of the fields below.
    48  	mu sync.RWMutex
    49  
    50  	// mode is the current kcov mode.
    51  	mode uint8
    52  
    53  	// size is the size of the mapping through which the kernel conveys coverage
    54  	// information to userspace.
    55  	size uint64
    56  
    57  	// owningTask is the task that currently owns coverage data on the system. The
    58  	// interface for kcov essentially requires that coverage is only going to a
    59  	// single task. Note that kcov should only generate coverage data for the
    60  	// owning task, but we currently generate global coverage.
    61  	owningTask *Task
    62  
    63  	// count is a locally cached version of the first uint64 in the kcov data,
    64  	// which is the number of subsequent entries representing PCs.
    65  	//
    66  	// It is used with kcovInode.countBlock(), to copy in/out the first element of
    67  	// the actual data in an efficient manner, avoid boilerplate, and prevent
    68  	// accidental garbage escapes by the temporary counts.
    69  	count uint64
    70  
    71  	mappable *mm.SpecialMappable
    72  }
    73  
    74  // NewKcov creates and returns a Kcov instance.
    75  func (k *Kernel) NewKcov() *Kcov {
    76  	return &Kcov{
    77  		mfp: k,
    78  	}
    79  }
    80  
    81  var coveragePool = sync.Pool{
    82  	New: func() any {
    83  		return make([]byte, 0)
    84  	},
    85  }
    86  
    87  // TaskWork implements TaskWorker.TaskWork.
    88  func (kcov *Kcov) TaskWork(t *Task) {
    89  	kcov.mu.Lock()
    90  	defer kcov.mu.Unlock()
    91  
    92  	if kcov.mode != linux.KCOV_MODE_TRACE_PC {
    93  		return
    94  	}
    95  
    96  	rw := &kcovReadWriter{
    97  		mf: kcov.mfp.MemoryFile(),
    98  		fr: kcov.mappable.FileRange(),
    99  	}
   100  
   101  	// Read in the PC count.
   102  	if _, err := safemem.ReadFullToBlocks(rw, kcov.countBlock()); err != nil {
   103  		panic(fmt.Sprintf("Internal error reading count from kcov area: %v", err))
   104  	}
   105  
   106  	rw.off = 8 * (1 + kcov.count)
   107  	n := coverage.ConsumeCoverageData(&kcovIOWriter{rw})
   108  
   109  	// Update the pc count, based on the number of entries written. Note that if
   110  	// we reached the end of the kcov area, we may not have written everything in
   111  	// output.
   112  	kcov.count += uint64(n / 8)
   113  	rw.off = 0
   114  	if _, err := safemem.WriteFullFromBlocks(rw, kcov.countBlock()); err != nil {
   115  		panic(fmt.Sprintf("Internal error writing count to kcov area: %v", err))
   116  	}
   117  
   118  	// Re-register for future work.
   119  	t.RegisterWork(kcov)
   120  }
   121  
   122  // InitTrace performs the KCOV_INIT_TRACE ioctl.
   123  func (kcov *Kcov) InitTrace(size uint64) error {
   124  	kcov.mu.Lock()
   125  	defer kcov.mu.Unlock()
   126  
   127  	if kcov.mode != linux.KCOV_MODE_DISABLED {
   128  		return linuxerr.EBUSY
   129  	}
   130  
   131  	// To simplify all the logic around mapping, we require that the length of the
   132  	// shared region is a multiple of the system page size.
   133  	if (8*size)&(hostarch.PageSize-1) != 0 {
   134  		return linuxerr.EINVAL
   135  	}
   136  
   137  	// We need space for at least two uint64s to hold current position and a
   138  	// single PC.
   139  	if size < 2 || size > kcovAreaSizeMax {
   140  		return linuxerr.EINVAL
   141  	}
   142  
   143  	kcov.size = size
   144  	kcov.mode = linux.KCOV_MODE_INIT
   145  	return nil
   146  }
   147  
   148  // EnableTrace performs the KCOV_ENABLE_TRACE ioctl.
   149  func (kcov *Kcov) EnableTrace(ctx context.Context, traceKind uint8) error {
   150  	t := TaskFromContext(ctx)
   151  	if t == nil {
   152  		panic("kcovInode.EnableTrace() cannot be used outside of a task goroutine")
   153  	}
   154  
   155  	kcov.mu.Lock()
   156  	defer kcov.mu.Unlock()
   157  
   158  	// KCOV_ENABLE must be preceded by KCOV_INIT_TRACE and an mmap call.
   159  	if kcov.mode != linux.KCOV_MODE_INIT || kcov.mappable == nil {
   160  		return linuxerr.EINVAL
   161  	}
   162  
   163  	switch traceKind {
   164  	case linux.KCOV_TRACE_PC:
   165  		kcov.mode = linux.KCOV_MODE_TRACE_PC
   166  	case linux.KCOV_TRACE_CMP:
   167  		// We do not support KCOV_MODE_TRACE_CMP.
   168  		return linuxerr.ENOTSUP
   169  	default:
   170  		return linuxerr.EINVAL
   171  	}
   172  
   173  	if kcov.owningTask != nil && kcov.owningTask != t {
   174  		return linuxerr.EBUSY
   175  	}
   176  
   177  	kcov.owningTask = t
   178  	t.SetKcov(kcov)
   179  	t.RegisterWork(kcov)
   180  
   181  	// Clear existing coverage data; the task expects to read only coverage data
   182  	// from the time it is activated.
   183  	coverage.ClearCoverageData()
   184  	return nil
   185  }
   186  
   187  // DisableTrace performs the KCOV_DISABLE_TRACE ioctl.
   188  func (kcov *Kcov) DisableTrace(ctx context.Context) error {
   189  	kcov.mu.Lock()
   190  	defer kcov.mu.Unlock()
   191  
   192  	t := TaskFromContext(ctx)
   193  	if t == nil {
   194  		panic("kcovInode.EnableTrace() cannot be used outside of a task goroutine")
   195  	}
   196  
   197  	if t != kcov.owningTask {
   198  		return linuxerr.EINVAL
   199  	}
   200  	kcov.mode = linux.KCOV_MODE_INIT
   201  	kcov.owningTask = nil
   202  	if kcov.mappable != nil {
   203  		kcov.mappable.DecRef(ctx)
   204  		kcov.mappable = nil
   205  	}
   206  	return nil
   207  }
   208  
   209  // Clear resets the mode and clears the owning task and memory mapping for kcov.
   210  // It is called when the fd corresponding to kcov is closed. Note that the mode
   211  // needs to be set so that the next call to kcov.TaskWork() will exit early.
   212  func (kcov *Kcov) Clear(ctx context.Context) {
   213  	kcov.mu.Lock()
   214  	kcov.mode = linux.KCOV_MODE_INIT
   215  	kcov.owningTask = nil
   216  	if kcov.mappable != nil {
   217  		kcov.mappable.DecRef(ctx)
   218  		kcov.mappable = nil
   219  	}
   220  	kcov.mu.Unlock()
   221  }
   222  
   223  // OnTaskExit is called when the owning task exits. It is similar to
   224  // kcov.Clear(), except the memory mapping is not cleared, so that the same
   225  // mapping can be used in the future if kcov is enabled again by another task.
   226  func (kcov *Kcov) OnTaskExit() {
   227  	kcov.mu.Lock()
   228  	kcov.mode = linux.KCOV_MODE_INIT
   229  	kcov.owningTask = nil
   230  	kcov.mu.Unlock()
   231  }
   232  
   233  // ConfigureMMap is called by the vfs.FileDescription for this kcov instance to
   234  // implement vfs.FileDescription.ConfigureMMap.
   235  func (kcov *Kcov) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {
   236  	kcov.mu.Lock()
   237  	defer kcov.mu.Unlock()
   238  
   239  	if kcov.mode != linux.KCOV_MODE_INIT {
   240  		return linuxerr.EINVAL
   241  	}
   242  
   243  	if kcov.mappable == nil {
   244  		// Set up the kcov area.
   245  		opts := pgalloc.AllocOpts{
   246  			Kind:    usage.Anonymous,
   247  			MemCgID: pgalloc.MemoryCgroupIDFromContext(ctx),
   248  		}
   249  		fr, err := kcov.mfp.MemoryFile().Allocate(kcov.size*8, opts)
   250  		if err != nil {
   251  			return err
   252  		}
   253  
   254  		// Get the thread id for the mmap name.
   255  		t := TaskFromContext(ctx)
   256  		if t == nil {
   257  			panic("ThreadFromContext returned nil")
   258  		}
   259  		// For convenience, a special mappable is used here. Note that these mappings
   260  		// will look different under /proc/[pid]/maps than they do on Linux.
   261  		kcov.mappable = mm.NewSpecialMappable(fmt.Sprintf("[kcov:%d]", t.ThreadID()), kcov.mfp, fr)
   262  	}
   263  	kcov.mappable.IncRef()
   264  	opts.Mappable = kcov.mappable
   265  	opts.MappingIdentity = kcov.mappable
   266  	return nil
   267  }
   268  
   269  // kcovReadWriter implements safemem.Reader and safemem.Writer.
   270  type kcovReadWriter struct {
   271  	off uint64
   272  	mf  *pgalloc.MemoryFile
   273  	fr  memmap.FileRange
   274  }
   275  
   276  // ReadToBlocks implements safemem.Reader.ReadToBlocks.
   277  func (rw *kcovReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) {
   278  	if dsts.IsEmpty() {
   279  		return 0, nil
   280  	}
   281  
   282  	// Limit the read to the kcov range and check for overflow.
   283  	if rw.fr.Length() <= rw.off {
   284  		return 0, io.EOF
   285  	}
   286  	start := rw.fr.Start + rw.off
   287  	end := rw.fr.Start + rw.fr.Length()
   288  	if rend := start + dsts.NumBytes(); rend < end {
   289  		end = rend
   290  	}
   291  
   292  	// Get internal mappings.
   293  	bs, err := rw.mf.MapInternal(memmap.FileRange{start, end}, hostarch.Read)
   294  	if err != nil {
   295  		return 0, err
   296  	}
   297  
   298  	// Copy from internal mappings.
   299  	n, err := safemem.CopySeq(dsts, bs)
   300  	rw.off += n
   301  	return n, err
   302  }
   303  
   304  // WriteFromBlocks implements safemem.Writer.WriteFromBlocks.
   305  func (rw *kcovReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) {
   306  	if srcs.IsEmpty() {
   307  		return 0, nil
   308  	}
   309  
   310  	// Limit the write to the kcov area and check for overflow.
   311  	if rw.fr.Length() <= rw.off {
   312  		return 0, io.EOF
   313  	}
   314  	start := rw.fr.Start + rw.off
   315  	end := rw.fr.Start + rw.fr.Length()
   316  	if wend := start + srcs.NumBytes(); wend < end {
   317  		end = wend
   318  	}
   319  
   320  	// Get internal mapping.
   321  	bs, err := rw.mf.MapInternal(memmap.FileRange{start, end}, hostarch.Write)
   322  	if err != nil {
   323  		return 0, err
   324  	}
   325  
   326  	// Copy to internal mapping.
   327  	n, err := safemem.CopySeq(bs, srcs)
   328  	rw.off += n
   329  	return n, err
   330  }
   331  
   332  // kcovIOWriter implements io.Writer as a basic wrapper over kcovReadWriter.
   333  type kcovIOWriter struct {
   334  	rw *kcovReadWriter
   335  }
   336  
   337  // Write implements io.Writer.Write.
   338  func (w *kcovIOWriter) Write(p []byte) (int, error) {
   339  	bs := safemem.BlockSeqOf(safemem.BlockFromSafeSlice(p))
   340  	n, err := safemem.WriteFullFromBlocks(w.rw, bs)
   341  	return int(n), err
   342  }