github.com/google/syzkaller@v0.0.0-20251211124644-a066d2bc4b02/syz-verifier/rpcserver.go (about)

     1  // Copyright 2021 syzkaller project authors. All rights reserved.
     2  // Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
     3  
     4  // TODO: switch syz-verifier to use syz-fuzzer.
     5  
     6  //go:build ignore
     7  
     8  package main
     9  
    10  import (
    11  	"errors"
    12  	"net"
    13  	"os"
    14  	"sync"
    15  
    16  	"github.com/google/syzkaller/pkg/log"
    17  	"github.com/google/syzkaller/pkg/rpctype"
    18  )
    19  
    20  // RPCServer is a wrapper around the rpc.Server. It communicates with  Runners,
    21  // generates programs and sends complete Results for verification.
    22  type RPCServer struct {
    23  	vrf  *Verifier
    24  	port int
    25  
    26  	// protects next variables
    27  	mu sync.Mutex
    28  	// used to count the pools w/o UnsupportedCalls result
    29  	notChecked int
    30  	// vmTasks store the per-VM currently assigned tasks Ids
    31  	vmTasksInProgress map[int]map[int64]bool
    32  }
    33  
    34  func startRPCServer(vrf *Verifier) (*RPCServer, error) {
    35  	srv := &RPCServer{
    36  		vrf:        vrf,
    37  		notChecked: len(vrf.pools),
    38  	}
    39  
    40  	s, err := rpctype.NewRPCServer(vrf.addr, "Verifier", srv)
    41  	if err != nil {
    42  		return nil, err
    43  	}
    44  
    45  	log.Logf(0, "serving rpc on tcp://%v", s.Addr())
    46  	srv.port = s.Addr().(*net.TCPAddr).Port
    47  
    48  	go s.Serve()
    49  	return srv, nil
    50  }
    51  
    52  // Connect notifies the RPCServer that a new Runner was started.
    53  func (srv *RPCServer) Connect(a *rpctype.RunnerConnectArgs, r *rpctype.RunnerConnectRes) error {
    54  	r.CheckUnsupportedCalls = !srv.vrf.pools[a.Pool].checked
    55  	return nil
    56  }
    57  
    58  // UpdateUnsupported communicates to the server the list of system calls not
    59  // supported by the kernel corresponding to this pool and updates the list of
    60  // enabled system calls. This function is called once for each kernel.
    61  // When all kernels have reported the list of unsupported system calls, the
    62  // choice table will be created using only the system calls supported by all
    63  // kernels.
    64  func (srv *RPCServer) UpdateUnsupported(a *rpctype.UpdateUnsupportedArgs, r *int) error {
    65  	srv.mu.Lock()
    66  	defer srv.mu.Unlock()
    67  
    68  	if srv.vrf.pools[a.Pool].checked {
    69  		return nil
    70  	}
    71  	srv.vrf.pools[a.Pool].checked = true
    72  	vrf := srv.vrf
    73  
    74  	for _, unsupported := range a.UnsupportedCalls {
    75  		if c := vrf.target.Syscalls[unsupported.ID]; vrf.calls[c] {
    76  			vrf.reasons[c] = unsupported.Reason
    77  		}
    78  	}
    79  
    80  	srv.notChecked--
    81  	if srv.notChecked == 0 {
    82  		vrf.finalizeCallSet(os.Stdout)
    83  
    84  		vrf.stats.SetSyscallMask(vrf.calls)
    85  		vrf.SetPrintStatAtSIGINT()
    86  
    87  		vrf.choiceTable = vrf.target.BuildChoiceTable(nil, vrf.calls)
    88  		vrf.progGeneratorInit.Done()
    89  	}
    90  	return nil
    91  }
    92  
    93  // NextExchange is called when a Runner requests a new program to execute and,
    94  // potentially, wants to send a new Result to the RPCServer.
    95  func (srv *RPCServer) NextExchange(a *rpctype.NextExchangeArgs, r *rpctype.NextExchangeRes) error {
    96  	if a.Info.Calls != nil {
    97  		srv.stopWaitResult(a.Pool, a.VM, a.ExecTaskID)
    98  		srv.vrf.PutExecResult(&ExecResult{
    99  			Pool:       a.Pool,
   100  			Hanged:     a.Hanged,
   101  			Info:       a.Info,
   102  			ExecTaskID: a.ExecTaskID,
   103  		})
   104  	}
   105  
   106  	// TODO: NewEnvironment is the currently hardcoded logic. Relax it.
   107  	task := srv.vrf.GetRunnerTask(a.Pool, NewEnvironment)
   108  	srv.startWaitResult(a.Pool, a.VM, task.ID)
   109  	r.ExecTask = *task
   110  
   111  	return nil
   112  }
   113  
   114  func vmTasksKey(poolID, vmID int) int {
   115  	return poolID*1000 + vmID
   116  }
   117  
   118  func (srv *RPCServer) startWaitResult(poolID, vmID int, taskID int64) {
   119  	srv.mu.Lock()
   120  	defer srv.mu.Unlock()
   121  
   122  	if srv.vmTasksInProgress == nil {
   123  		srv.vmTasksInProgress = make(map[int]map[int64]bool)
   124  	}
   125  
   126  	if srv.vmTasksInProgress[vmTasksKey(poolID, vmID)] == nil {
   127  		srv.vmTasksInProgress[vmTasksKey(poolID, vmID)] =
   128  			make(map[int64]bool)
   129  	}
   130  
   131  	srv.vmTasksInProgress[vmTasksKey(poolID, vmID)][taskID] = true
   132  }
   133  
   134  func (srv *RPCServer) stopWaitResult(poolID, vmID int, taskID int64) {
   135  	srv.mu.Lock()
   136  	defer srv.mu.Unlock()
   137  	delete(srv.vmTasksInProgress[vmTasksKey(poolID, vmID)], taskID)
   138  }
   139  
   140  // cleanup is called when a vm.Instance crashes.
   141  func (srv *RPCServer) cleanup(poolID, vmID int) {
   142  	srv.mu.Lock()
   143  	defer srv.mu.Unlock()
   144  
   145  	// Signal error for every VM related task and let upper level logic to process it.
   146  	for taskID := range srv.vmTasksInProgress[vmTasksKey(poolID, vmID)] {
   147  		srv.vrf.PutExecResult(&ExecResult{
   148  			Pool:       poolID,
   149  			ExecTaskID: taskID,
   150  			Crashed:    true,
   151  			Error:      errors.New("VM crashed during the task execution"),
   152  		})
   153  	}
   154  	delete(srv.vmTasksInProgress, vmTasksKey(poolID, vmID))
   155  }