github.com/google/syzkaller@v0.0.0-20240517125934-c0f1611a36d6/syz-manager/rpc.go (about)

     1  // Copyright 2018 syzkaller project authors. All rights reserved.
     2  // Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
     3  
     4  package main
     5  
     6  import (
     7  	"bytes"
     8  	"fmt"
     9  	"net"
    10  	"sort"
    11  	"strings"
    12  	"sync"
    13  	"sync/atomic"
    14  	"time"
    15  
    16  	"github.com/google/syzkaller/pkg/cover"
    17  	"github.com/google/syzkaller/pkg/flatrpc"
    18  	"github.com/google/syzkaller/pkg/fuzzer/queue"
    19  	"github.com/google/syzkaller/pkg/ipc"
    20  	"github.com/google/syzkaller/pkg/log"
    21  	"github.com/google/syzkaller/pkg/mgrconfig"
    22  	"github.com/google/syzkaller/pkg/rpctype"
    23  	"github.com/google/syzkaller/pkg/signal"
    24  	"github.com/google/syzkaller/pkg/stats"
    25  	"github.com/google/syzkaller/pkg/vminfo"
    26  	"github.com/google/syzkaller/prog"
    27  )
    28  
    29  type RPCServer struct {
    30  	mgr     RPCManagerView
    31  	cfg     *mgrconfig.Config
    32  	target  *prog.Target
    33  	server  *rpctype.RPCServer
    34  	checker *vminfo.Checker
    35  	port    int
    36  
    37  	infoDone         bool
    38  	checkDone        atomic.Bool
    39  	checkFailures    int
    40  	baseSource       *queue.DynamicSourceCtl
    41  	enabledFeatures  flatrpc.Feature
    42  	setupFeatures    flatrpc.Feature
    43  	modules          []cover.KernelModule
    44  	canonicalModules *cover.Canonicalizer
    45  	execCoverFilter  map[uint32]uint32
    46  	coverFilter      map[uint32]uint32
    47  
    48  	mu         sync.Mutex
    49  	runners    sync.Map // Instead of map[string]*Runner.
    50  	execSource queue.Source
    51  
    52  	statExecs                 *stats.Val
    53  	statExecRetries           *stats.Val
    54  	statExecutorRestarts      *stats.Val
    55  	statExecBufferTooSmall    *stats.Val
    56  	statVMRestarts            *stats.Val
    57  	statExchangeCalls         *stats.Val
    58  	statExchangeProgs         *stats.Val
    59  	statExchangeServerLatency *stats.Val
    60  	statExchangeClientLatency *stats.Val
    61  	statCoverFiltered         *stats.Val
    62  }
    63  
    64  type Runner struct {
    65  	name        string
    66  	injectLog   chan<- []byte
    67  	injectStop  chan bool
    68  	stopFuzzing atomic.Bool
    69  
    70  	machineInfo []byte
    71  	instModules *cover.CanonicalizerInstance
    72  
    73  	// The mutex protects newMaxSignal, dropMaxSignal, and requests.
    74  	mu            sync.Mutex
    75  	newMaxSignal  signal.Signal
    76  	dropMaxSignal signal.Signal
    77  	nextRequestID int64
    78  	requests      map[int64]Request
    79  }
    80  
    81  type Request struct {
    82  	req        *queue.Request
    83  	serialized []byte
    84  	try        int
    85  	procID     int
    86  }
    87  
    88  type BugFrames struct {
    89  	memoryLeaks []string
    90  	dataRaces   []string
    91  }
    92  
    93  // RPCManagerView restricts interface between RPCServer and Manager.
    94  type RPCManagerView interface {
    95  	currentBugFrames() BugFrames
    96  	machineChecked(features flatrpc.Feature, enabledSyscalls map[*prog.Syscall]bool,
    97  		opts ipc.ExecOpts) queue.Source
    98  }
    99  
   100  func startRPCServer(mgr *Manager) (*RPCServer, error) {
   101  	checker := vminfo.New(mgr.cfg)
   102  	baseSource := queue.DynamicSource(checker)
   103  	serv := &RPCServer{
   104  		mgr:        mgr,
   105  		cfg:        mgr.cfg,
   106  		target:     mgr.target,
   107  		checker:    checker,
   108  		baseSource: baseSource,
   109  		execSource: queue.Retry(baseSource),
   110  		statExecs:  mgr.statExecs,
   111  		statExecRetries: stats.Create("exec retries",
   112  			"Number of times a test program was restarted because the first run failed",
   113  			stats.Rate{}, stats.Graph("executor")),
   114  		statExecutorRestarts: stats.Create("executor restarts",
   115  			"Number of times executor process was restarted", stats.Rate{}, stats.Graph("executor")),
   116  		statExecBufferTooSmall: stats.Create("buffer too small",
   117  			"Program serialization overflowed exec buffer", stats.NoGraph),
   118  		statVMRestarts: stats.Create("vm restarts", "Total number of VM starts",
   119  			stats.Rate{}, stats.NoGraph),
   120  		statExchangeCalls: stats.Create("exchange calls", "Number of RPC Exchange calls",
   121  			stats.Rate{}),
   122  		statExchangeProgs: stats.Create("exchange progs", "Test programs exchanged per RPC call",
   123  			stats.Distribution{}),
   124  		statExchangeServerLatency: stats.Create("exchange manager latency",
   125  			"Manager RPC Exchange call latency (us)", stats.Distribution{}),
   126  		statExchangeClientLatency: stats.Create("exchange fuzzer latency",
   127  			"End-to-end fuzzer RPC Exchange call latency (us)", stats.Distribution{}),
   128  		statCoverFiltered: stats.Create("filtered coverage", "", stats.NoGraph),
   129  	}
   130  	s, err := rpctype.NewRPCServer(mgr.cfg.RPC, "Manager", serv)
   131  	if err != nil {
   132  		return nil, err
   133  	}
   134  	baseSource.Store(serv.checker)
   135  
   136  	log.Logf(0, "serving rpc on tcp://%v", s.Addr())
   137  	serv.port = s.Addr().(*net.TCPAddr).Port
   138  	serv.server = s
   139  	go s.Serve()
   140  	return serv, nil
   141  }
   142  
   143  func (serv *RPCServer) Connect(a *rpctype.ConnectArgs, r *rpctype.ConnectRes) error {
   144  	log.Logf(1, "fuzzer %v connected", a.Name)
   145  	checkRevisions(a, serv.cfg.Target)
   146  	serv.statVMRestarts.Add(1)
   147  
   148  	bugFrames := serv.mgr.currentBugFrames()
   149  	r.MemoryLeakFrames = bugFrames.memoryLeaks
   150  	r.DataRaceFrames = bugFrames.dataRaces
   151  
   152  	serv.mu.Lock()
   153  	defer serv.mu.Unlock()
   154  	r.ReadFiles = serv.checker.RequiredFiles()
   155  	if serv.checkDone.Load() {
   156  		r.Features = serv.setupFeatures
   157  	} else {
   158  		r.ReadFiles = append(r.ReadFiles, serv.checker.CheckFiles()...)
   159  		r.ReadGlobs = serv.target.RequiredGlobs()
   160  		r.Features = flatrpc.AllFeatures
   161  	}
   162  	return nil
   163  }
   164  
   165  func checkRevisions(a *rpctype.ConnectArgs, target *prog.Target) {
   166  	if target.Arch != a.ExecutorArch {
   167  		log.Fatalf("mismatching target/executor arches: %v vs %v", target.Arch, a.ExecutorArch)
   168  	}
   169  	if prog.GitRevision != a.GitRevision {
   170  		log.Fatalf("mismatching manager/fuzzer git revisions: %v vs %v",
   171  			prog.GitRevision, a.GitRevision)
   172  	}
   173  	if prog.GitRevision != a.ExecutorGitRevision {
   174  		log.Fatalf("mismatching manager/executor git revisions: %v vs %v",
   175  			prog.GitRevision, a.ExecutorGitRevision)
   176  	}
   177  	if target.Revision != a.SyzRevision {
   178  		log.Fatalf("mismatching manager/fuzzer system call descriptions: %v vs %v",
   179  			target.Revision, a.SyzRevision)
   180  	}
   181  	if target.Revision != a.ExecutorSyzRevision {
   182  		log.Fatalf("mismatching manager/executor system call descriptions: %v vs %v",
   183  			target.Revision, a.ExecutorSyzRevision)
   184  	}
   185  }
   186  
   187  func (serv *RPCServer) Check(a *rpctype.CheckArgs, r *rpctype.CheckRes) error {
   188  	serv.mu.Lock()
   189  	defer serv.mu.Unlock()
   190  
   191  	modules, machineInfo, err := serv.checker.MachineInfo(a.Files)
   192  	if err != nil {
   193  		log.Logf(0, "parsing of machine info failed: %v", err)
   194  		if a.Error == "" {
   195  			a.Error = err.Error()
   196  		}
   197  	}
   198  
   199  	if a.Error != "" {
   200  		log.Logf(0, "machine check failed: %v", a.Error)
   201  		serv.checkFailures++
   202  		if serv.checkFailures == 10 {
   203  			log.Fatalf("machine check failing")
   204  		}
   205  		return fmt.Errorf("machine check failed: %v", a.Error)
   206  	}
   207  
   208  	if !serv.infoDone {
   209  		serv.infoDone = true
   210  
   211  		// Now execute check programs.
   212  		go serv.runCheck(a.Files, a.Features)
   213  
   214  		serv.modules = modules
   215  		serv.target.UpdateGlobs(a.Globs)
   216  		serv.canonicalModules = cover.NewCanonicalizer(modules, serv.cfg.Cover)
   217  		var err error
   218  		serv.execCoverFilter, serv.coverFilter, err = createCoverageFilter(serv.cfg, modules)
   219  		if err != nil {
   220  			log.Fatalf("failed to init coverage filter: %v", err)
   221  		}
   222  	}
   223  
   224  	runner := serv.findRunner(a.Name)
   225  	if runner == nil {
   226  		// There may be a parallel shutdownInstance() call that removes the runner.
   227  		return fmt.Errorf("unknown runner %s", a.Name)
   228  	}
   229  
   230  	runner.mu.Lock()
   231  	defer runner.mu.Unlock()
   232  	if runner.machineInfo != nil {
   233  		return fmt.Errorf("duplicate connection from %s", a.Name)
   234  	}
   235  	runner.machineInfo = machineInfo
   236  	runner.instModules = serv.canonicalModules.NewInstance(modules)
   237  	instCoverFilter := runner.instModules.DecanonicalizeFilter(serv.execCoverFilter)
   238  	r.CoverFilterBitmap = createCoverageBitmap(serv.cfg.SysTarget, instCoverFilter)
   239  	return nil
   240  }
   241  
   242  func (serv *RPCServer) runCheck(checkFilesInfo []*flatrpc.FileInfo, checkFeatureInfo []*flatrpc.FeatureInfo) {
   243  	if err := serv.finishCheck(checkFilesInfo, checkFeatureInfo); err != nil {
   244  		log.Fatalf("check failed: %v", err)
   245  	}
   246  	serv.checkDone.Store(true)
   247  }
   248  
   249  func (serv *RPCServer) finishCheck(checkFilesInfo []*flatrpc.FileInfo, checkFeatureInfo []*flatrpc.FeatureInfo) error {
   250  	// Note: need to print disbled syscalls before failing due to an error.
   251  	// This helps to debug "all system calls are disabled".
   252  
   253  	enabledCalls, disabledCalls, features, checkErr := serv.checker.Run(checkFilesInfo, checkFeatureInfo)
   254  	enabledCalls, transitivelyDisabled := serv.target.TransitivelyEnabledCalls(enabledCalls)
   255  	buf := new(bytes.Buffer)
   256  	if len(serv.cfg.EnabledSyscalls) != 0 || log.V(1) {
   257  		if len(disabledCalls) != 0 {
   258  			var lines []string
   259  			for call, reason := range disabledCalls {
   260  				lines = append(lines, fmt.Sprintf("%-44v: %v\n", call.Name, reason))
   261  			}
   262  			sort.Strings(lines)
   263  			fmt.Fprintf(buf, "disabled the following syscalls:\n%s\n", strings.Join(lines, ""))
   264  		}
   265  		if len(transitivelyDisabled) != 0 {
   266  			var lines []string
   267  			for call, reason := range transitivelyDisabled {
   268  				lines = append(lines, fmt.Sprintf("%-44v: %v\n", call.Name, reason))
   269  			}
   270  			sort.Strings(lines)
   271  			fmt.Fprintf(buf, "transitively disabled the following syscalls"+
   272  				" (missing resource [creating syscalls]):\n%s\n",
   273  				strings.Join(lines, ""))
   274  		}
   275  	}
   276  	hasFileErrors := false
   277  	for _, file := range checkFilesInfo {
   278  		if file.Error == "" {
   279  			continue
   280  		}
   281  		if !hasFileErrors {
   282  			fmt.Fprintf(buf, "failed to read the following files in the VM:\n")
   283  		}
   284  		fmt.Fprintf(buf, "%-44v: %v\n", file.Name, file.Error)
   285  		hasFileErrors = true
   286  	}
   287  	if hasFileErrors {
   288  		fmt.Fprintf(buf, "\n")
   289  	}
   290  	var lines []string
   291  	lines = append(lines, fmt.Sprintf("%-24v: %v/%v\n", "syscalls",
   292  		len(enabledCalls), len(serv.cfg.Target.Syscalls)))
   293  	for feat, info := range features {
   294  		lines = append(lines, fmt.Sprintf("%-24v: %v\n",
   295  			flatrpc.EnumNamesFeature[feat], info.Reason))
   296  	}
   297  	sort.Strings(lines)
   298  	buf.WriteString(strings.Join(lines, ""))
   299  	fmt.Fprintf(buf, "\n")
   300  	log.Logf(0, "machine check:\n%s", buf.Bytes())
   301  	if checkErr != nil {
   302  		return checkErr
   303  	}
   304  	if len(enabledCalls) == 0 {
   305  		return fmt.Errorf("all system calls are disabled")
   306  	}
   307  	serv.enabledFeatures = features.Enabled()
   308  	serv.setupFeatures = features.NeedSetup()
   309  	newSource := serv.mgr.machineChecked(serv.enabledFeatures, enabledCalls, serv.execOpts())
   310  	serv.baseSource.Store(newSource)
   311  	return nil
   312  }
   313  
   314  func (serv *RPCServer) StartExecuting(a *rpctype.ExecutingRequest, r *int) error {
   315  	serv.statExecs.Add(1)
   316  	if a.Try != 0 {
   317  		serv.statExecRetries.Add(1)
   318  	}
   319  	runner := serv.findRunner(a.Name)
   320  	if runner == nil {
   321  		return nil
   322  	}
   323  	runner.mu.Lock()
   324  	req, ok := runner.requests[a.ID]
   325  	if !ok {
   326  		runner.mu.Unlock()
   327  		return nil
   328  	}
   329  	// RPC handlers are invoked in separate goroutines, so start executing notifications
   330  	// can outrun each other and completion notification.
   331  	if req.try < a.Try {
   332  		req.try = a.Try
   333  		req.procID = a.ProcID
   334  	}
   335  	runner.requests[a.ID] = req
   336  	runner.mu.Unlock()
   337  	runner.logProgram(a.ProcID, req.serialized)
   338  	return nil
   339  }
   340  
   341  func (serv *RPCServer) ExchangeInfo(a *rpctype.ExchangeInfoRequest, r *rpctype.ExchangeInfoReply) error {
   342  	start := time.Now()
   343  	runner := serv.findRunner(a.Name)
   344  	if runner == nil {
   345  		return nil
   346  	}
   347  	// First query new inputs and only then post results.
   348  	// It should foster a more even distribution of executions
   349  	// across all VMs.
   350  	for len(r.Requests) < a.NeedProgs {
   351  		inp := serv.execSource.Next()
   352  		if inp == nil {
   353  			// It's unlikely that subsequent Next() calls will yield something.
   354  			break
   355  		}
   356  		if err := validateRequest(inp); err != nil {
   357  			panic(fmt.Sprintf("invalid request: %v, req: %#v", err, inp))
   358  		}
   359  		if req, ok := serv.newRequest(runner, inp); ok {
   360  			r.Requests = append(r.Requests, req)
   361  			if inp.Risky() {
   362  				// We give crashed inputs only one more chance, so if we resend many of them at once,
   363  				// we'll never figure out the actual problematic input.
   364  				break
   365  			}
   366  		} else {
   367  			// It's bad if we systematically fail to serialize programs,
   368  			// but so far we don't have a better handling than counting this.
   369  			// This error is observed a lot on the seeded syz_mount_image calls.
   370  			serv.statExecBufferTooSmall.Add(1)
   371  			inp.Done(&queue.Result{Status: queue.ExecFailure})
   372  		}
   373  	}
   374  
   375  	for _, result := range a.Results {
   376  		serv.doneRequest(runner, result)
   377  	}
   378  
   379  	stats.Import(a.StatsDelta)
   380  
   381  	runner.mu.Lock()
   382  	// Let's transfer new max signal in portions.
   383  
   384  	const transferMaxSignal = 500000
   385  	newSignal := runner.newMaxSignal.Split(transferMaxSignal)
   386  	dropSignal := runner.dropMaxSignal.Split(transferMaxSignal)
   387  	runner.mu.Unlock()
   388  
   389  	r.NewMaxSignal = runner.instModules.Decanonicalize(newSignal.ToRaw())
   390  	r.DropMaxSignal = runner.instModules.Decanonicalize(dropSignal.ToRaw())
   391  
   392  	log.Logf(2, "exchange with %s: %d done, %d new requests, %d new max signal, %d drop signal",
   393  		a.Name, len(a.Results), len(r.Requests), len(r.NewMaxSignal), len(r.DropMaxSignal))
   394  
   395  	serv.statExchangeCalls.Add(1)
   396  	serv.statExchangeProgs.Add(a.NeedProgs)
   397  	serv.statExchangeClientLatency.Add(int(a.Latency.Microseconds()))
   398  	serv.statExchangeServerLatency.Add(int(time.Since(start).Microseconds()))
   399  	return nil
   400  }
   401  
   402  func validateRequest(req *queue.Request) error {
   403  	err := req.Validate()
   404  	if err != nil {
   405  		return err
   406  	}
   407  	if req.BinaryFile != "" {
   408  		// Currnetly it should only be done in tools/syz-runtest.
   409  		return fmt.Errorf("binary file execution is not supported")
   410  	}
   411  	return nil
   412  }
   413  
   414  func (serv *RPCServer) findRunner(name string) *Runner {
   415  	if val, _ := serv.runners.Load(name); val != nil {
   416  		runner := val.(*Runner)
   417  		if runner.stopFuzzing.Load() {
   418  			return nil
   419  		}
   420  		return runner
   421  	}
   422  	// There might be a parallel shutdownInstance().
   423  	// Ignore requests then.
   424  	return nil
   425  }
   426  
   427  func (serv *RPCServer) createInstance(name string, maxSignal signal.Signal, injectLog chan<- []byte) {
   428  	runner := &Runner{
   429  		name:         name,
   430  		requests:     make(map[int64]Request),
   431  		newMaxSignal: maxSignal,
   432  		injectLog:    injectLog,
   433  		injectStop:   make(chan bool),
   434  	}
   435  	if _, loaded := serv.runners.LoadOrStore(name, runner); loaded {
   436  		panic(fmt.Sprintf("duplicate instance %s", name))
   437  	}
   438  }
   439  
   440  // stopInstance prevents further request exchange requests.
   441  // To make RPCServer fully forget an instance, shutdownInstance() must be called.
   442  func (serv *RPCServer) stopFuzzing(name string) {
   443  	runner := serv.findRunner(name)
   444  	if runner == nil {
   445  		return
   446  	}
   447  	runner.stopFuzzing.Store(true)
   448  }
   449  
   450  func (serv *RPCServer) shutdownInstance(name string, crashed bool) []byte {
   451  	runnerPtr, _ := serv.runners.LoadAndDelete(name)
   452  	runner := runnerPtr.(*Runner)
   453  	runner.mu.Lock()
   454  	if runner.requests == nil {
   455  		// We are supposed to invoke this code only once.
   456  		panic("Runner.requests is already nil")
   457  	}
   458  	oldRequests := runner.requests
   459  	runner.requests = nil
   460  	runner.mu.Unlock()
   461  
   462  	close(runner.injectStop)
   463  
   464  	serv.mu.Lock()
   465  	defer serv.mu.Unlock()
   466  	for _, req := range oldRequests {
   467  		if crashed && req.try >= 0 {
   468  			req.req.Done(&queue.Result{Status: queue.Crashed})
   469  		} else {
   470  			req.req.Done(&queue.Result{Status: queue.Restarted})
   471  		}
   472  	}
   473  	return runner.machineInfo
   474  }
   475  
   476  func (serv *RPCServer) distributeSignalDelta(plus, minus signal.Signal) {
   477  	serv.runners.Range(func(key, value any) bool {
   478  		runner := value.(*Runner)
   479  		runner.mu.Lock()
   480  		defer runner.mu.Unlock()
   481  		runner.newMaxSignal.Merge(plus)
   482  		runner.dropMaxSignal.Merge(minus)
   483  		return true
   484  	})
   485  }
   486  
   487  func (serv *RPCServer) updateCoverFilter(newCover []uint32) {
   488  	if len(newCover) == 0 || serv.coverFilter == nil {
   489  		return
   490  	}
   491  	rg, _ := getReportGenerator(serv.cfg, serv.modules)
   492  	if rg == nil {
   493  		return
   494  	}
   495  	filtered := 0
   496  	for _, pc := range newCover {
   497  		if serv.coverFilter[uint32(rg.RestorePC(pc))] != 0 {
   498  			filtered++
   499  		}
   500  	}
   501  	serv.statCoverFiltered.Add(filtered)
   502  }
   503  
   504  func (serv *RPCServer) doneRequest(runner *Runner, resp rpctype.ExecutionResult) {
   505  	info := &resp.Info
   506  	if info.Freshness == 0 {
   507  		serv.statExecutorRestarts.Add(1)
   508  	}
   509  	runner.mu.Lock()
   510  	req, ok := runner.requests[resp.ID]
   511  	if ok {
   512  		delete(runner.requests, resp.ID)
   513  	}
   514  	runner.mu.Unlock()
   515  	if !ok {
   516  		// There may be a concurrent shutdownInstance() call.
   517  		return
   518  	}
   519  	// RPC handlers are invoked in separate goroutines, so log the program here
   520  	// if completion notification outrun start executing notification.
   521  	if req.try < resp.Try {
   522  		runner.logProgram(resp.ProcID, req.serialized)
   523  	}
   524  	for i := 0; i < len(info.Calls); i++ {
   525  		call := &info.Calls[i]
   526  		call.Cover = runner.instModules.Canonicalize(call.Cover)
   527  		call.Signal = runner.instModules.Canonicalize(call.Signal)
   528  	}
   529  	info.Extra.Cover = runner.instModules.Canonicalize(info.Extra.Cover)
   530  	info.Extra.Signal = runner.instModules.Canonicalize(info.Extra.Signal)
   531  
   532  	result := &queue.Result{
   533  		Status: queue.Success,
   534  		Info:   info,
   535  		Output: resp.Output,
   536  	}
   537  	if resp.Error != "" {
   538  		result.Status = queue.ExecFailure
   539  		result.Err = fmt.Errorf("%s", resp.Error)
   540  	} else if !serv.cfg.Cover && req.req.ExecOpts.ExecFlags&flatrpc.ExecFlagCollectSignal > 0 {
   541  		// Coverage collection is disabled, but signal was requested => use a substitute signal.
   542  		addFallbackSignal(req.req.Prog, info)
   543  	}
   544  	req.req.Done(result)
   545  }
   546  
   547  func (serv *RPCServer) newRequest(runner *Runner, req *queue.Request) (rpctype.ExecutionRequest, bool) {
   548  	progData, err := req.Prog.SerializeForExec()
   549  	if err != nil {
   550  		return rpctype.ExecutionRequest{}, false
   551  	}
   552  
   553  	// logProgram() may race with Done(), so let's serialize the program right now.
   554  	serialized := req.Prog.Serialize()
   555  
   556  	var signalFilter signal.Signal
   557  	if req.SignalFilter != nil {
   558  		newRawSignal := runner.instModules.Decanonicalize(req.SignalFilter.ToRaw())
   559  		// We don't care about specific priorities here.
   560  		signalFilter = signal.FromRaw(newRawSignal, 0)
   561  	}
   562  	runner.mu.Lock()
   563  	runner.nextRequestID++
   564  	id := runner.nextRequestID
   565  	if runner.requests != nil {
   566  		runner.requests[id] = Request{
   567  			req:        req,
   568  			try:        -1,
   569  			serialized: serialized,
   570  		}
   571  	}
   572  	runner.mu.Unlock()
   573  	return rpctype.ExecutionRequest{
   574  		ID:               id,
   575  		ProgData:         progData,
   576  		ExecOpts:         req.ExecOpts,
   577  		NewSignal:        !req.ReturnAllSignal,
   578  		SignalFilter:     signalFilter,
   579  		SignalFilterCall: req.SignalFilterCall,
   580  		ResetState:       serv.cfg.Experimental.ResetAccState,
   581  		ReturnError:      req.ReturnError,
   582  		ReturnOutput:     req.ReturnOutput,
   583  	}, true
   584  }
   585  
   586  func (serv *RPCServer) execOpts() ipc.ExecOpts {
   587  	env := ipc.FeaturesToFlags(serv.enabledFeatures, nil)
   588  	if *flagDebug {
   589  		env |= flatrpc.ExecEnvDebug
   590  	}
   591  	if serv.cfg.Cover {
   592  		env |= flatrpc.ExecEnvSignal
   593  	}
   594  	sandbox, err := ipc.SandboxToFlags(serv.cfg.Sandbox)
   595  	if err != nil {
   596  		panic(fmt.Sprintf("failed to parse sandbox: %v", err))
   597  	}
   598  	env |= sandbox
   599  
   600  	exec := flatrpc.ExecFlagThreaded
   601  	if !serv.cfg.RawCover {
   602  		exec |= flatrpc.ExecFlagDedupCover
   603  	}
   604  	if serv.cfg.HasCovFilter() {
   605  		exec |= flatrpc.ExecFlagCoverFilter
   606  	}
   607  	return ipc.ExecOpts{
   608  		EnvFlags:   env,
   609  		ExecFlags:  exec,
   610  		SandboxArg: serv.cfg.SandboxArg,
   611  	}
   612  }
   613  
   614  func (runner *Runner) logProgram(procID int, serialized []byte) {
   615  	buf := new(bytes.Buffer)
   616  	fmt.Fprintf(buf, "executing program %v:\n%s\n", procID, serialized)
   617  	select {
   618  	case runner.injectLog <- buf.Bytes():
   619  	case <-runner.injectStop:
   620  	}
   621  }
   622  
   623  // addFallbackSignal computes simple fallback signal in cases we don't have real coverage signal.
   624  // We use syscall number or-ed with returned errno value as signal.
   625  // At least this gives us all combinations of syscall+errno.
   626  func addFallbackSignal(p *prog.Prog, info *ipc.ProgInfo) {
   627  	callInfos := make([]prog.CallInfo, len(info.Calls))
   628  	for i, inf := range info.Calls {
   629  		if inf.Flags&ipc.CallExecuted != 0 {
   630  			callInfos[i].Flags |= prog.CallExecuted
   631  		}
   632  		if inf.Flags&ipc.CallFinished != 0 {
   633  			callInfos[i].Flags |= prog.CallFinished
   634  		}
   635  		if inf.Flags&ipc.CallBlocked != 0 {
   636  			callInfos[i].Flags |= prog.CallBlocked
   637  		}
   638  		callInfos[i].Errno = inf.Errno
   639  	}
   640  	p.FallbackSignal(callInfos)
   641  	for i, inf := range callInfos {
   642  		info.Calls[i].Signal = inf.Signal
   643  	}
   644  }