github.com/google/syzkaller@v0.0.0-20240517125934-c0f1611a36d6/pkg/csource/csource.go (about)

     1  // Copyright 2015 syzkaller project authors. All rights reserved.
     2  // Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
     3  
     4  // Package csource generates [almost] equivalent C programs from syzkaller programs.
     5  //
     6  // Outline of the process:
     7  //   - inputs to the generation are the program and options
     8  //   - options control multiple aspects of the resulting C program,
     9  //     like if we want a multi-threaded program or a single-threaded,
    10  //     what type of sandbox we want to use, if we want to setup net devices or not, etc
    11  //   - we use actual executor sources as the base
    12  //   - gen.go takes all executor/common*.h headers and bundles them into generated.go
    13  //   - during generation we tear executor headers apart and take only the bits
    14  //     we need for the current program/options, this is done by running C preprocessor
    15  //     with particular set of defines so that the preprocessor removes unneeded
    16  //     #ifdef SYZ_FOO sections
    17  //   - then we generate actual syscall calls with the given arguments
    18  //     based on the binary "encodingexec" representation of the program
    19  //     (the same representation executor uses for interpretation)
    20  //   - then we glue it all together
    21  //   - as the last step we run some text post-processing on the resulting source code:
    22  //     remove debug calls, replace exitf/fail with exit, hoist/sort/dedup includes,
    23  //     remove duplicate empty lines, etc
    24  package csource
    25  
    26  import (
    27  	"bytes"
    28  	"fmt"
    29  	"math/bits"
    30  	"regexp"
    31  	"sort"
    32  	"strconv"
    33  	"strings"
    34  	"time"
    35  
    36  	"github.com/google/syzkaller/prog"
    37  	"github.com/google/syzkaller/sys/targets"
    38  )
    39  
    40  // Write generates C source for program p based on the provided options opt.
    41  func Write(p *prog.Prog, opts Options) ([]byte, error) {
    42  	if err := opts.Check(p.Target.OS); err != nil {
    43  		return nil, fmt.Errorf("csource: invalid opts: %w", err)
    44  	}
    45  	ctx := &context{
    46  		p:         p,
    47  		opts:      opts,
    48  		target:    p.Target,
    49  		sysTarget: targets.Get(p.Target.OS, p.Target.Arch),
    50  		calls:     make(map[string]uint64),
    51  	}
    52  	return ctx.generateSource()
    53  }
    54  
    55  type context struct {
    56  	p         *prog.Prog
    57  	opts      Options
    58  	target    *prog.Target
    59  	sysTarget *targets.Target
    60  	calls     map[string]uint64 // CallName -> NR
    61  }
    62  
    63  func generateSandboxFunctionSignature(sandboxName string, sandboxArg int) string {
    64  	if sandboxName == "" {
    65  		return "loop();"
    66  	}
    67  
    68  	arguments := "();"
    69  	if sandboxName == "android" {
    70  		arguments = "(" + strconv.Itoa(sandboxArg) + ");"
    71  	}
    72  	return "do_sandbox_" + sandboxName + arguments
    73  }
    74  
    75  func (ctx *context) generateSource() ([]byte, error) {
    76  	ctx.filterCalls()
    77  	calls, vars, err := ctx.generateProgCalls(ctx.p, ctx.opts.Trace)
    78  	if err != nil {
    79  		return nil, err
    80  	}
    81  
    82  	mmapProg := ctx.p.Target.DataMmapProg()
    83  	mmapCalls, _, err := ctx.generateProgCalls(mmapProg, false)
    84  	if err != nil {
    85  		return nil, err
    86  	}
    87  
    88  	for _, c := range append(mmapProg.Calls, ctx.p.Calls...) {
    89  		ctx.calls[c.Meta.CallName] = c.Meta.NR
    90  		for _, dep := range ctx.sysTarget.PseudoSyscallDeps[c.Meta.CallName] {
    91  			depCall := ctx.target.SyscallMap[dep]
    92  			if depCall == nil {
    93  				panic(dep + " is specified in PseudoSyscallDeps, but not present")
    94  			}
    95  			ctx.calls[depCall.CallName] = depCall.NR
    96  		}
    97  	}
    98  
    99  	varsBuf := new(bytes.Buffer)
   100  	if len(vars) != 0 {
   101  		fmt.Fprintf(varsBuf, "uint64 r[%v] = {", len(vars))
   102  		for i, v := range vars {
   103  			if i != 0 {
   104  				fmt.Fprintf(varsBuf, ", ")
   105  			}
   106  			fmt.Fprintf(varsBuf, "0x%x", v)
   107  		}
   108  		fmt.Fprintf(varsBuf, "};\n")
   109  	}
   110  
   111  	sandboxFunc := generateSandboxFunctionSignature(ctx.opts.Sandbox, ctx.opts.SandboxArg)
   112  	replacements := map[string]string{
   113  		"PROCS":           fmt.Sprint(ctx.opts.Procs),
   114  		"REPEAT_TIMES":    fmt.Sprint(ctx.opts.RepeatTimes),
   115  		"NUM_CALLS":       fmt.Sprint(len(ctx.p.Calls)),
   116  		"MMAP_DATA":       strings.Join(mmapCalls, ""),
   117  		"SYSCALL_DEFINES": ctx.generateSyscallDefines(),
   118  		"SANDBOX_FUNC":    sandboxFunc,
   119  		"RESULTS":         varsBuf.String(),
   120  		"SYSCALLS":        ctx.generateSyscalls(calls, len(vars) != 0),
   121  	}
   122  	if !ctx.opts.Threaded && !ctx.opts.Repeat && ctx.opts.Sandbox == "" {
   123  		// This inlines syscalls right into main for the simplest case.
   124  		replacements["SANDBOX_FUNC"] = replacements["SYSCALLS"]
   125  		replacements["SYSCALLS"] = "unused"
   126  	}
   127  	timeouts := ctx.sysTarget.Timeouts(ctx.opts.Slowdown)
   128  	replacements["PROGRAM_TIMEOUT_MS"] = fmt.Sprint(int(timeouts.Program / time.Millisecond))
   129  	timeoutExpr := fmt.Sprint(int(timeouts.Syscall / time.Millisecond))
   130  	replacements["BASE_CALL_TIMEOUT_MS"] = timeoutExpr
   131  	for i, call := range ctx.p.Calls {
   132  		if timeout := call.Meta.Attrs.Timeout; timeout != 0 {
   133  			timeoutExpr += fmt.Sprintf(" + (call == %v ? %v : 0)", i, timeout*uint64(timeouts.Scale))
   134  		}
   135  	}
   136  	replacements["CALL_TIMEOUT_MS"] = timeoutExpr
   137  	if ctx.p.RequiredFeatures().Async {
   138  		conditions := []string{}
   139  		for idx, call := range ctx.p.Calls {
   140  			if !call.Props.Async {
   141  				continue
   142  			}
   143  			conditions = append(conditions, fmt.Sprintf("call == %v", idx))
   144  		}
   145  		replacements["ASYNC_CONDITIONS"] = strings.Join(conditions, " || ")
   146  	}
   147  
   148  	result, err := createCommonHeader(ctx.p, mmapProg, replacements, ctx.opts)
   149  	if err != nil {
   150  		return nil, err
   151  	}
   152  	const header = "// autogenerated by syzkaller (https://github.com/google/syzkaller)\n\n"
   153  	result = append([]byte(header), result...)
   154  	result = ctx.postProcess(result)
   155  	return result, nil
   156  }
   157  
   158  // This is a kludge, but we keep it here until a better approach is implemented.
   159  // TODO: untie syz_emit_ethernet/syz_extract_tcp_res and NetInjection. And also
   160  // untie VhciInjection and syz_emit_vhci. Then we could remove this method.
   161  func (ctx *context) filterCalls() {
   162  	p := ctx.p
   163  	for i := 0; i < len(p.Calls); {
   164  		call := p.Calls[i]
   165  		callName := call.Meta.CallName
   166  		emitCall := (ctx.opts.NetInjection ||
   167  			callName != "syz_emit_ethernet" &&
   168  				callName != "syz_extract_tcp_res") &&
   169  			(ctx.opts.VhciInjection || callName != "syz_emit_vhci")
   170  		if emitCall {
   171  			i++
   172  			continue
   173  		}
   174  		// Remove the call.
   175  		if ctx.p == p {
   176  			// We lazily clone the program to avoid unnecessary copying.
   177  			p = ctx.p.Clone()
   178  		}
   179  		p.RemoveCall(i)
   180  	}
   181  	ctx.p = p
   182  }
   183  
   184  func (ctx *context) generateSyscalls(calls []string, hasVars bool) string {
   185  	opts := ctx.opts
   186  	buf := new(bytes.Buffer)
   187  	if !opts.Threaded && !opts.Collide {
   188  		if len(calls) > 0 && (hasVars || opts.Trace) {
   189  			fmt.Fprintf(buf, "\tintptr_t res = 0;\n")
   190  		}
   191  		fmt.Fprintf(buf, "\tif (write(1, \"executing program\\n\", sizeof(\"executing program\\n\") - 1)) {}\n")
   192  		if opts.Trace {
   193  			fmt.Fprintf(buf, "\tfprintf(stderr, \"### start\\n\");\n")
   194  		}
   195  		for _, c := range calls {
   196  			fmt.Fprintf(buf, "%s", c)
   197  		}
   198  	} else if len(calls) > 0 {
   199  		if hasVars || opts.Trace {
   200  			fmt.Fprintf(buf, "\tintptr_t res = 0;\n")
   201  		}
   202  		fmt.Fprintf(buf, "\tswitch (call) {\n")
   203  		for i, c := range calls {
   204  			fmt.Fprintf(buf, "\tcase %v:\n", i)
   205  			fmt.Fprintf(buf, "%s", strings.Replace(c, "\t", "\t\t", -1))
   206  			fmt.Fprintf(buf, "\t\tbreak;\n")
   207  		}
   208  		fmt.Fprintf(buf, "\t}\n")
   209  	}
   210  	return buf.String()
   211  }
   212  
   213  func (ctx *context) generateSyscallDefines() string {
   214  	var calls []string
   215  	for name, nr := range ctx.calls {
   216  		if !ctx.sysTarget.HasCallNumber(name) || !ctx.sysTarget.NeedSyscallDefine(nr) {
   217  			continue
   218  		}
   219  		calls = append(calls, name)
   220  	}
   221  	sort.Strings(calls)
   222  	buf := new(bytes.Buffer)
   223  	prefix := ctx.sysTarget.SyscallPrefix
   224  	for _, name := range calls {
   225  		fmt.Fprintf(buf, "#ifndef %v%v\n", prefix, name)
   226  		fmt.Fprintf(buf, "#define %v%v %v\n", prefix, name, ctx.calls[name])
   227  		fmt.Fprintf(buf, "#endif\n")
   228  	}
   229  	if ctx.target.OS == targets.Linux && ctx.target.PtrSize == 4 {
   230  		// This is a dirty hack.
   231  		// On 32-bit linux mmap translated to old_mmap syscall which has a different signature.
   232  		// mmap2 has the right signature. syz-extract translates mmap to mmap2, do the same here.
   233  		fmt.Fprintf(buf, "#undef __NR_mmap\n")
   234  		fmt.Fprintf(buf, "#define __NR_mmap __NR_mmap2\n")
   235  	}
   236  	return buf.String()
   237  }
   238  
   239  func (ctx *context) generateProgCalls(p *prog.Prog, trace bool) ([]string, []uint64, error) {
   240  	exec, err := p.SerializeForExec()
   241  	if err != nil {
   242  		return nil, nil, fmt.Errorf("failed to serialize program: %w", err)
   243  	}
   244  	decoded, err := ctx.target.DeserializeExec(exec, nil)
   245  	if err != nil {
   246  		return nil, nil, err
   247  	}
   248  	calls, vars := ctx.generateCalls(decoded, trace)
   249  	return calls, vars, nil
   250  }
   251  
   252  func (ctx *context) generateCalls(p prog.ExecProg, trace bool) ([]string, []uint64) {
   253  	var calls []string
   254  	csumSeq := 0
   255  	for ci, call := range p.Calls {
   256  		w := new(bytes.Buffer)
   257  		// Copyin.
   258  		for _, copyin := range call.Copyin {
   259  			ctx.copyin(w, &csumSeq, copyin)
   260  		}
   261  
   262  		if call.Props.FailNth > 0 {
   263  			fmt.Fprintf(w, "\tinject_fault(%v);\n", call.Props.FailNth)
   264  		}
   265  		// Call itself.
   266  		resCopyout := call.Index != prog.ExecNoCopyout
   267  		argCopyout := len(call.Copyout) != 0
   268  
   269  		ctx.emitCall(w, call, ci, resCopyout || argCopyout, trace)
   270  
   271  		if call.Props.Rerun > 0 {
   272  			fmt.Fprintf(w, "\tfor (int i = 0; i < %v; i++) {\n", call.Props.Rerun)
   273  			// Rerun invocations should not affect the result value.
   274  			ctx.emitCall(w, call, ci, false, false)
   275  			fmt.Fprintf(w, "\t}\n")
   276  		}
   277  		// Copyout.
   278  		if resCopyout || argCopyout {
   279  			ctx.copyout(w, call, resCopyout)
   280  		}
   281  		calls = append(calls, w.String())
   282  	}
   283  	return calls, p.Vars
   284  }
   285  
   286  func isNative(sysTarget *targets.Target, callName string) bool {
   287  	_, trampoline := sysTarget.SyscallTrampolines[callName]
   288  	return sysTarget.HasCallNumber(callName) && !trampoline
   289  }
   290  
   291  func (ctx *context) emitCall(w *bytes.Buffer, call prog.ExecCall, ci int, haveCopyout, trace bool) {
   292  	native := isNative(ctx.sysTarget, call.Meta.CallName)
   293  	fmt.Fprintf(w, "\t")
   294  	if !native {
   295  		// This mimics the same as executor does for execute_syscall,
   296  		// but only for non-native syscalls to reduce clutter (native syscalls are assumed to not crash).
   297  		// Arrange for res = -1 in case of syscall abort, we care about errno only if we are tracing for pkg/runtest.
   298  		if haveCopyout || trace {
   299  			fmt.Fprintf(w, "res = -1;\n\t")
   300  		}
   301  		if trace {
   302  			fmt.Fprintf(w, "errno = EFAULT;\n\t")
   303  		}
   304  		fmt.Fprintf(w, "NONFAILING(")
   305  	}
   306  	if haveCopyout || trace {
   307  		fmt.Fprintf(w, "res = ")
   308  	}
   309  	w.WriteString(ctx.fmtCallBody(call))
   310  	if !native {
   311  		fmt.Fprintf(w, ")") // close NONFAILING macro
   312  	}
   313  	fmt.Fprintf(w, ";")
   314  	comment := ctx.target.AnnotateCall(call)
   315  	if comment != "" {
   316  		fmt.Fprintf(w, " /* %s */", comment)
   317  	}
   318  	fmt.Fprintf(w, "\n")
   319  	if trace {
   320  		cast := ""
   321  		if !native && !strings.HasPrefix(call.Meta.CallName, "syz_") {
   322  			// Potentially we casted a function returning int to a function returning intptr_t.
   323  			// So instead of intptr_t -1 we can get 0x00000000ffffffff. Sign extend it to intptr_t.
   324  			cast = "(intptr_t)(int)"
   325  		}
   326  		fmt.Fprintf(w, "\tfprintf(stderr, \"### call=%v errno=%%u\\n\", %vres == -1 ? errno : 0);\n", ci, cast)
   327  	}
   328  }
   329  
   330  func (ctx *context) fmtCallBody(call prog.ExecCall) string {
   331  	native := isNative(ctx.sysTarget, call.Meta.CallName)
   332  	callName, ok := ctx.sysTarget.SyscallTrampolines[call.Meta.CallName]
   333  	if !ok {
   334  		callName = call.Meta.CallName
   335  	}
   336  	argsStrs := []string{}
   337  	funcName := ""
   338  	if native {
   339  		funcName = "syscall"
   340  		argsStrs = append(argsStrs, ctx.sysTarget.SyscallPrefix+callName)
   341  	} else if strings.HasPrefix(callName, "syz_") {
   342  		funcName = callName
   343  	} else {
   344  		args := strings.Repeat(",intptr_t", len(call.Args)+call.Meta.MissingArgs)
   345  		if args != "" {
   346  			args = args[1:]
   347  		}
   348  		funcName = fmt.Sprintf("((intptr_t(*)(%v))CAST(%v))", args, callName)
   349  	}
   350  	for i, arg := range call.Args {
   351  		switch arg := arg.(type) {
   352  		case prog.ExecArgConst:
   353  			if arg.Format != prog.FormatNative && arg.Format != prog.FormatBigEndian {
   354  				panic("string format in syscall argument")
   355  			}
   356  			com := ctx.argComment(call.Meta.Args[i], arg)
   357  			suf := ctx.literalSuffix(arg, native)
   358  			argsStrs = append(argsStrs, com+handleBigEndian(arg, ctx.constArgToStr(arg, suf)))
   359  		case prog.ExecArgResult:
   360  			if arg.Format != prog.FormatNative && arg.Format != prog.FormatBigEndian {
   361  				panic("string format in syscall argument")
   362  			}
   363  			com := ctx.argComment(call.Meta.Args[i], arg)
   364  			val := ctx.resultArgToStr(arg)
   365  			if native && ctx.target.PtrSize == 4 {
   366  				// syscall accepts args as ellipsis, resources are uint64
   367  				// and take 2 slots without the cast, which would be wrong.
   368  				val = "(intptr_t)" + val
   369  			}
   370  			argsStrs = append(argsStrs, com+val)
   371  		default:
   372  			panic(fmt.Sprintf("unknown arg type: %+v", arg))
   373  		}
   374  	}
   375  	for i := 0; i < call.Meta.MissingArgs; i++ {
   376  		argsStrs = append(argsStrs, "0")
   377  	}
   378  	return fmt.Sprintf("%v(%v)", funcName, strings.Join(argsStrs, ", "))
   379  }
   380  
   381  func (ctx *context) generateCsumInet(w *bytes.Buffer, addr uint64, arg prog.ExecArgCsum, csumSeq int) {
   382  	fmt.Fprintf(w, "\tstruct csum_inet csum_%d;\n", csumSeq)
   383  	fmt.Fprintf(w, "\tcsum_inet_init(&csum_%d);\n", csumSeq)
   384  	for i, chunk := range arg.Chunks {
   385  		switch chunk.Kind {
   386  		case prog.ExecArgCsumChunkData:
   387  			fmt.Fprintf(w, "\tNONFAILING(csum_inet_update(&csum_%d, (const uint8*)0x%x, %d));\n",
   388  				csumSeq, chunk.Value, chunk.Size)
   389  		case prog.ExecArgCsumChunkConst:
   390  			fmt.Fprintf(w, "\tuint%d csum_%d_chunk_%d = 0x%x;\n",
   391  				chunk.Size*8, csumSeq, i, chunk.Value)
   392  			fmt.Fprintf(w, "\tcsum_inet_update(&csum_%d, (const uint8*)&csum_%d_chunk_%d, %d);\n",
   393  				csumSeq, csumSeq, i, chunk.Size)
   394  		default:
   395  			panic(fmt.Sprintf("unknown checksum chunk kind %v", chunk.Kind))
   396  		}
   397  	}
   398  	fmt.Fprintf(w, "\tNONFAILING(*(uint16*)0x%x = csum_inet_digest(&csum_%d));\n",
   399  		addr, csumSeq)
   400  }
   401  
   402  func (ctx *context) copyin(w *bytes.Buffer, csumSeq *int, copyin prog.ExecCopyin) {
   403  	switch arg := copyin.Arg.(type) {
   404  	case prog.ExecArgConst:
   405  		if arg.BitfieldOffset == 0 && arg.BitfieldLength == 0 {
   406  			ctx.copyinVal(w, copyin.Addr, arg.Size, handleBigEndian(arg, ctx.constArgToStr(arg, "")), arg.Format)
   407  		} else {
   408  			if arg.Format != prog.FormatNative && arg.Format != prog.FormatBigEndian {
   409  				panic("bitfield+string format")
   410  			}
   411  			htobe := ""
   412  			if ctx.target.LittleEndian && arg.Format == prog.FormatBigEndian {
   413  				htobe = fmt.Sprintf("htobe%v", arg.Size*8)
   414  			}
   415  			bitfieldOffset := arg.BitfieldOffset
   416  			if !ctx.target.LittleEndian {
   417  				bitfieldOffset = arg.Size*8 - arg.BitfieldOffset - arg.BitfieldLength
   418  			}
   419  			fmt.Fprintf(w, "\tNONFAILING(STORE_BY_BITMASK(uint%v, %v, 0x%x, %v, %v, %v));\n",
   420  				arg.Size*8, htobe, copyin.Addr, ctx.constArgToStr(arg, ""),
   421  				bitfieldOffset, arg.BitfieldLength)
   422  		}
   423  	case prog.ExecArgResult:
   424  		ctx.copyinVal(w, copyin.Addr, arg.Size, ctx.resultArgToStr(arg), arg.Format)
   425  	case prog.ExecArgData:
   426  		if bytes.Equal(arg.Data, bytes.Repeat(arg.Data[:1], len(arg.Data))) {
   427  			fmt.Fprintf(w, "\tNONFAILING(memset((void*)0x%x, %v, %v));\n",
   428  				copyin.Addr, arg.Data[0], len(arg.Data))
   429  		} else {
   430  			fmt.Fprintf(w, "\tNONFAILING(memcpy((void*)0x%x, \"%s\", %v));\n",
   431  				copyin.Addr, toCString(arg.Data, arg.Readable), len(arg.Data))
   432  		}
   433  	case prog.ExecArgCsum:
   434  		switch arg.Kind {
   435  		case prog.ExecArgCsumInet:
   436  			*csumSeq++
   437  			ctx.generateCsumInet(w, copyin.Addr, arg, *csumSeq)
   438  		default:
   439  			panic(fmt.Sprintf("unknown csum kind %v", arg.Kind))
   440  		}
   441  	default:
   442  		panic(fmt.Sprintf("bad argument type: %+v", arg))
   443  	}
   444  }
   445  
   446  func (ctx *context) copyinVal(w *bytes.Buffer, addr, size uint64, val string, bf prog.BinaryFormat) {
   447  	switch bf {
   448  	case prog.FormatNative, prog.FormatBigEndian:
   449  		fmt.Fprintf(w, "\tNONFAILING(*(uint%v*)0x%x = %v);\n", size*8, addr, val)
   450  	case prog.FormatStrDec:
   451  		if size != 20 {
   452  			panic("bad strdec size")
   453  		}
   454  		fmt.Fprintf(w, "\tNONFAILING(sprintf((char*)0x%x, \"%%020llu\", (long long)%v));\n", addr, val)
   455  	case prog.FormatStrHex:
   456  		if size != 18 {
   457  			panic("bad strdec size")
   458  		}
   459  		fmt.Fprintf(w, "\tNONFAILING(sprintf((char*)0x%x, \"0x%%016llx\", (long long)%v));\n", addr, val)
   460  	case prog.FormatStrOct:
   461  		if size != 23 {
   462  			panic("bad strdec size")
   463  		}
   464  		fmt.Fprintf(w, "\tNONFAILING(sprintf((char*)0x%x, \"%%023llo\", (long long)%v));\n", addr, val)
   465  	default:
   466  		panic("unknown binary format")
   467  	}
   468  }
   469  
   470  func (ctx *context) copyout(w *bytes.Buffer, call prog.ExecCall, resCopyout bool) {
   471  	if ctx.sysTarget.OS == targets.Fuchsia {
   472  		// On fuchsia we have real system calls that return ZX_OK on success,
   473  		// and libc calls that are casted to function returning intptr_t,
   474  		// as the result int -1 is returned as 0x00000000ffffffff rather than full -1.
   475  		if strings.HasPrefix(call.Meta.CallName, "zx_") {
   476  			fmt.Fprintf(w, "\tif (res == ZX_OK)")
   477  		} else {
   478  			fmt.Fprintf(w, "\tif ((int)res != -1)")
   479  		}
   480  	} else {
   481  		fmt.Fprintf(w, "\tif (res != -1)")
   482  	}
   483  	copyoutMultiple := len(call.Copyout) > 1 || resCopyout && len(call.Copyout) > 0
   484  	if copyoutMultiple {
   485  		fmt.Fprintf(w, " {")
   486  	}
   487  	fmt.Fprintf(w, "\n")
   488  	if resCopyout {
   489  		fmt.Fprintf(w, "\t\tr[%v] = res;\n", call.Index)
   490  	}
   491  	for _, copyout := range call.Copyout {
   492  		fmt.Fprintf(w, "\t\tNONFAILING(r[%v] = *(uint%v*)0x%x);\n",
   493  			copyout.Index, copyout.Size*8, copyout.Addr)
   494  	}
   495  	if copyoutMultiple {
   496  		fmt.Fprintf(w, "\t}\n")
   497  	}
   498  }
   499  
   500  func (ctx *context) factorizeAsFlags(value uint64, flags []string, attemptsLeft *int) ([]string, uint64) {
   501  	if len(flags) == 0 || value == 0 || *attemptsLeft == 0 {
   502  		return nil, value
   503  	}
   504  
   505  	*attemptsLeft -= 1
   506  	currentFlag := flags[0]
   507  	subset, remainder := ctx.factorizeAsFlags(value, flags[1:], attemptsLeft)
   508  
   509  	if flagMask, ok := ctx.p.Target.ConstMap[currentFlag]; ok && (value&flagMask == flagMask) {
   510  		subsetIfTaken, remainderIfTaken := ctx.factorizeAsFlags(value & ^flagMask, flags[1:], attemptsLeft)
   511  		subsetIfTaken = append(subsetIfTaken, currentFlag)
   512  
   513  		bits, bitsIfTaken := bits.OnesCount64(remainder), bits.OnesCount64(remainderIfTaken)
   514  		if (bitsIfTaken < bits) || (bits == bitsIfTaken && len(subsetIfTaken) < len(subset)) {
   515  			return subsetIfTaken, remainderIfTaken
   516  		}
   517  	}
   518  
   519  	return subset, remainder
   520  }
   521  
   522  func (ctx *context) prettyPrintValue(field prog.Field, arg prog.ExecArgConst) string {
   523  	mask := (uint64(1) << (arg.Size * 8)) - 1
   524  	v := arg.Value & mask
   525  
   526  	f := ctx.p.Target.FlagsMap[field.Type.Name()]
   527  	if len(f) == 0 {
   528  		return ""
   529  	}
   530  
   531  	maxFactorizationAttempts := 256
   532  	flags, remainder := ctx.factorizeAsFlags(v, f, &maxFactorizationAttempts)
   533  	if len(flags) == 0 {
   534  		return ""
   535  	}
   536  	if remainder != 0 {
   537  		flags = append(flags, fmt.Sprintf("0x%x", remainder))
   538  	}
   539  
   540  	return strings.Join(flags, "|")
   541  }
   542  
   543  func (ctx *context) argComment(field prog.Field, arg prog.ExecArg) string {
   544  	val := ""
   545  	constArg, isConstArg := arg.(prog.ExecArgConst)
   546  	if isConstArg {
   547  		val = ctx.prettyPrintValue(field, constArg)
   548  	}
   549  
   550  	return "/*" + field.Name + "=" + val + "*/"
   551  }
   552  
   553  func (ctx *context) constArgToStr(arg prog.ExecArgConst, suffix string) string {
   554  	mask := (uint64(1) << (arg.Size * 8)) - 1
   555  	v := arg.Value & mask
   556  	val := ""
   557  	if v == ^uint64(0)&mask {
   558  		val = "-1"
   559  	} else if v >= 10 {
   560  		val = fmt.Sprintf("0x%x%s", v, suffix)
   561  	} else {
   562  		val = fmt.Sprintf("%d%s", v, suffix)
   563  	}
   564  	if ctx.opts.Procs > 1 && arg.PidStride != 0 {
   565  		val += fmt.Sprintf(" + procid*%v", arg.PidStride)
   566  	}
   567  	return val
   568  }
   569  
   570  func (ctx *context) literalSuffix(arg prog.ExecArgConst, native bool) string {
   571  	if native && arg.Size == 8 {
   572  		// syscall() is variadic, so constant arguments must be explicitly
   573  		// promoted. Otherwise the compiler is free to leave garbage in the
   574  		// upper 32 bits of the argument value. In practice this can happen
   575  		// on amd64 with arguments that are passed on the stack, i.e.,
   576  		// arguments beyond the first six. For example, on freebsd/amd64,
   577  		// syscall(SYS_mmap, ..., 0) causes clang to emit a 32-bit store of
   578  		// 0 to the stack, but the kernel expects a 64-bit value.
   579  		//
   580  		// syzkaller's argument type representations do not always match
   581  		// the OS ABI. For instance, "flags" is always 64 bits wide on 64-bit
   582  		// platforms, but is a 32-bit value ("unsigned int" or so) in many
   583  		// cases. Thus, we assume here that passing a 64-bit argument where
   584  		// a 32-bit argument is expected won't break anything. On amd64
   585  		// this should be fine: arguments are passed in 64-bit registers or
   586  		// at 64 bit-aligned addresses on the stack.
   587  		if ctx.target.PtrSize == 4 {
   588  			return "ull"
   589  		} else {
   590  			return "ul"
   591  		}
   592  	}
   593  	return ""
   594  }
   595  
   596  func handleBigEndian(arg prog.ExecArgConst, val string) string {
   597  	if arg.Format == prog.FormatBigEndian {
   598  		return fmt.Sprintf("htobe%v(%v)", arg.Size*8, val)
   599  	}
   600  	return val
   601  }
   602  
   603  func (ctx *context) resultArgToStr(arg prog.ExecArgResult) string {
   604  	res := fmt.Sprintf("r[%v]", arg.Index)
   605  	if arg.DivOp != 0 {
   606  		res = fmt.Sprintf("%v/%v", res, arg.DivOp)
   607  	}
   608  	if arg.AddOp != 0 {
   609  		res = fmt.Sprintf("%v+%v", res, arg.AddOp)
   610  	}
   611  	if arg.Format == prog.FormatBigEndian {
   612  		res = fmt.Sprintf("htobe%v(%v)", arg.Size*8, res)
   613  	}
   614  	return res
   615  }
   616  
   617  func (ctx *context) postProcess(result []byte) []byte {
   618  	// Remove NONFAILING, debug, fail, etc calls.
   619  	if !ctx.opts.HandleSegv {
   620  		result = regexp.MustCompile(`\t*NONFAILING\((.*)\);\n`).ReplaceAll(result, []byte("$1;\n"))
   621  	}
   622  	result = bytes.Replace(result, []byte("NORETURN"), nil, -1)
   623  	result = bytes.Replace(result, []byte("doexit("), []byte("exit("), -1)
   624  	// TODO: Figure out what would be the right replacement for doexit_thread().
   625  	result = bytes.Replace(result, []byte("doexit_thread("), []byte("exit("), -1)
   626  	result = regexp.MustCompile(`PRINTF\(.*?\)`).ReplaceAll(result, nil)
   627  	result = regexp.MustCompile(`\t*debug\((.*\n)*?.*\);\n`).ReplaceAll(result, nil)
   628  	result = regexp.MustCompile(`\t*debug_dump_data\((.*\n)*?.*\);\n`).ReplaceAll(result, nil)
   629  	result = regexp.MustCompile(`\t*exitf\((.*\n)*?.*\);\n`).ReplaceAll(result, []byte("\texit(1);\n"))
   630  	result = regexp.MustCompile(`\t*fail(msg)?\((.*\n)*?.*\);\n`).ReplaceAll(result, []byte("\texit(1);\n"))
   631  
   632  	result = ctx.hoistIncludes(result)
   633  	result = ctx.removeEmptyLines(result)
   634  	return result
   635  }
   636  
   637  // hoistIncludes moves all includes to the top, removes dups and sorts.
   638  func (ctx *context) hoistIncludes(result []byte) []byte {
   639  	includesStart := bytes.Index(result, []byte("#include"))
   640  	if includesStart == -1 {
   641  		return result
   642  	}
   643  	includes := make(map[string]bool)
   644  	includeRe := regexp.MustCompile("#include <.*>\n")
   645  	for _, match := range includeRe.FindAll(result, -1) {
   646  		includes[string(match)] = true
   647  	}
   648  	result = includeRe.ReplaceAll(result, nil)
   649  	// Certain linux and bsd headers are broken and go to the bottom.
   650  	var sorted, sortedBottom, sortedTop []string
   651  	for include := range includes {
   652  		if strings.Contains(include, "<linux/") {
   653  			sortedBottom = append(sortedBottom, include)
   654  		} else if strings.Contains(include, "<netinet/if_ether.h>") {
   655  			sortedBottom = append(sortedBottom, include)
   656  		} else if ctx.target.OS == targets.FreeBSD && strings.Contains(include, "<sys/types.h>") {
   657  			sortedTop = append(sortedTop, include)
   658  		} else {
   659  			sorted = append(sorted, include)
   660  		}
   661  	}
   662  	sort.Strings(sortedTop)
   663  	sort.Strings(sorted)
   664  	sort.Strings(sortedBottom)
   665  	newResult := append([]byte{}, result[:includesStart]...)
   666  	newResult = append(newResult, strings.Join(sortedTop, "")...)
   667  	newResult = append(newResult, '\n')
   668  	newResult = append(newResult, strings.Join(sorted, "")...)
   669  	newResult = append(newResult, '\n')
   670  	newResult = append(newResult, strings.Join(sortedBottom, "")...)
   671  	newResult = append(newResult, result[includesStart:]...)
   672  	return newResult
   673  }
   674  
   675  // removeEmptyLines removes duplicate new lines.
   676  func (ctx *context) removeEmptyLines(result []byte) []byte {
   677  	for {
   678  		newResult := bytes.Replace(result, []byte{'\n', '\n', '\n'}, []byte{'\n', '\n'}, -1)
   679  		newResult = bytes.Replace(newResult, []byte{'\n', '\n', '\t'}, []byte{'\n', '\t'}, -1)
   680  		newResult = bytes.Replace(newResult, []byte{'\n', '\n', ' '}, []byte{'\n', ' '}, -1)
   681  		if len(newResult) == len(result) {
   682  			return result
   683  		}
   684  		result = newResult
   685  	}
   686  }
   687  
   688  func toCString(data []byte, readable bool) []byte {
   689  	if len(data) == 0 {
   690  		panic("empty data arg")
   691  	}
   692  	buf := new(bytes.Buffer)
   693  	prog.EncodeData(buf, data, readable)
   694  	return buf.Bytes()
   695  }