github.com/google/syzkaller@v0.0.0-20251211124644-a066d2bc4b02/prog/kfuzztest.go (about)

     1  // Copyright 2025 syzkaller project authors. All rights reserved.
     2  // Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
     3  package prog
     4  
     5  import (
     6  	"bytes"
     7  	"encoding/binary"
     8  	"fmt"
     9  )
    10  
    11  const (
    12  	kFuzzTestRegionIDNull uint32 = ^uint32(0)
    13  	kFuzzTestPoisonSize   uint64 = 0x8
    14  
    15  	kFuzzTestVersion    uint32 = 0
    16  	kFuzzTestMagic      uint32 = 0xBFACE
    17  	kFuzzTestPrefixSize        = 8
    18  
    19  	// Minimum region alignment required by KFuzzTest. This is exposed by the
    20  	// /sys/kernel/debug/kfuzztest/_config/minalign debugfs file. This value
    21  	// always equals MAX(ARCH_KMALLOC_MINALIGN, KFUZZTEST_POISON_SIZE) = 8 on
    22  	// x86_64, so we hardcode it for now. A more robust solution would involve
    23  	// reading this from the debugfs entry at boot before fuzzing begins.
    24  	kFuzzTestMinalign uint64 = 8
    25  
    26  	// Maximum input size accepted by the KFuzzTest kernel module.
    27  	KFuzzTestMaxInputSize uint64 = 64 << 10
    28  )
    29  
    30  func kFuzzTestWritePrefix(buf *bytes.Buffer) {
    31  	prefix := (uint64(kFuzzTestVersion) << 32) | uint64(kFuzzTestMagic)
    32  	binary.Write(buf, binary.LittleEndian, prefix)
    33  }
    34  
    35  func isPowerOfTwo(n uint64) bool {
    36  	return n > 0 && (n&(n-1) == 0)
    37  }
    38  
    39  func roundUpPowerOfTwo(x, n uint64) uint64 {
    40  	if !isPowerOfTwo(n) {
    41  		panic("n was not a power of 2")
    42  	}
    43  	return (x + n - 1) &^ (n - 1)
    44  }
    45  
    46  // Pad b so that it's length is a multiple of alignment, with at least
    47  // minPadding bytes of padding, where alignment is a power of 2.
    48  func padWithAlignment(b *bytes.Buffer, alignment, minPadding uint64) {
    49  	var newSize uint64
    50  	if alignment == 0 {
    51  		newSize = uint64(b.Len()) + minPadding
    52  	} else {
    53  		newSize = roundUpPowerOfTwo(uint64(b.Len())+minPadding, alignment)
    54  	}
    55  
    56  	paddingBytes := newSize - uint64(b.Len())
    57  	for range paddingBytes {
    58  		b.WriteByte(byte(0))
    59  	}
    60  }
    61  
    62  type sliceQueue[T any] struct {
    63  	q []T
    64  }
    65  
    66  func (sq *sliceQueue[T]) push(elem T) {
    67  	sq.q = append(sq.q, elem)
    68  }
    69  
    70  func (sq *sliceQueue[T]) pop() T {
    71  	ret := sq.q[0]
    72  	sq.q = sq.q[1:]
    73  	return ret
    74  }
    75  
    76  func (sq *sliceQueue[T]) isEmpty() bool {
    77  	return len(sq.q) == 0
    78  }
    79  
    80  func newSliceQueue[T any]() *sliceQueue[T] {
    81  	return &sliceQueue[T]{q: make([]T, 0)}
    82  }
    83  
    84  type kFuzzTestRelocation struct {
    85  	offset    uint32
    86  	srcRegion Arg
    87  	dstRegion Arg
    88  }
    89  
    90  type kFuzzTestRegion struct {
    91  	offset uint32
    92  	size   uint32
    93  }
    94  
    95  // The following helpers and definitions follow directly from the C-struct
    96  // definitions in <include/linux/kfuzztest.h>.
    97  const kFuzzTestRegionSize = 8
    98  
    99  func kFuzzTestRegionArraySize(numRegions int) int {
   100  	return 4 + kFuzzTestRegionSize*numRegions
   101  }
   102  
   103  func kFuzzTestWriteRegion(buf *bytes.Buffer, region kFuzzTestRegion) {
   104  	binary.Write(buf, binary.LittleEndian, region.offset)
   105  	binary.Write(buf, binary.LittleEndian, region.size)
   106  }
   107  
   108  func kFuzzTestWriteRegionArray(buf *bytes.Buffer, regions []kFuzzTestRegion) {
   109  	binary.Write(buf, binary.LittleEndian, uint32(len(regions)))
   110  	for _, reg := range regions {
   111  		kFuzzTestWriteRegion(buf, reg)
   112  	}
   113  }
   114  
   115  const kFuzzTestRelocationSize = 12
   116  
   117  func kFuzzTestRelocTableSize(numRelocs int) int {
   118  	return 8 + kFuzzTestRelocationSize*numRelocs
   119  }
   120  
   121  func kFuzzTestWriteReloc(buf *bytes.Buffer, regToID *map[Arg]int, reloc kFuzzTestRelocation) {
   122  	binary.Write(buf, binary.LittleEndian, uint32((*regToID)[reloc.srcRegion]))
   123  	binary.Write(buf, binary.LittleEndian, reloc.offset)
   124  	if reloc.dstRegion == nil {
   125  		binary.Write(buf, binary.LittleEndian, kFuzzTestRegionIDNull)
   126  	} else {
   127  		binary.Write(buf, binary.LittleEndian, uint32((*regToID)[reloc.dstRegion]))
   128  	}
   129  }
   130  
   131  func kFuzzTestWriteRelocTable(buf *bytes.Buffer, regToID *map[Arg]int,
   132  	relocations []kFuzzTestRelocation, paddingBytes uint64) {
   133  	binary.Write(buf, binary.LittleEndian, uint32(len(relocations)))
   134  	binary.Write(buf, binary.LittleEndian, uint32(paddingBytes))
   135  	for _, reloc := range relocations {
   136  		kFuzzTestWriteReloc(buf, regToID, reloc)
   137  	}
   138  	buf.Write(make([]byte, paddingBytes))
   139  }
   140  
   141  const kFuzzTestPlaceHolderPtr uint64 = 0xFFFFFFFFFFFFFFFF
   142  
   143  // Expands a region, and returns a list of relocations that need to be made.
   144  func kFuzzTestExpandRegion(reg Arg) ([]byte, []kFuzzTestRelocation) {
   145  	relocations := []kFuzzTestRelocation{}
   146  	var encoded bytes.Buffer
   147  	queue := newSliceQueue[Arg]()
   148  	queue.push(reg)
   149  
   150  	for !queue.isEmpty() {
   151  		arg := queue.pop()
   152  		padWithAlignment(&encoded, arg.Type().Alignment(), 0)
   153  
   154  		switch a := arg.(type) {
   155  		case *PointerArg:
   156  			offset := uint32(encoded.Len())
   157  			binary.Write(&encoded, binary.LittleEndian, kFuzzTestPlaceHolderPtr)
   158  			relocations = append(relocations, kFuzzTestRelocation{offset, reg, a.Res})
   159  		case *GroupArg:
   160  			for _, inner := range a.Inner {
   161  				queue.push(inner)
   162  			}
   163  		case *DataArg:
   164  			data := a.data
   165  			buffer, ok := a.ArgCommon.Type().(*BufferType)
   166  			if !ok {
   167  				panic("DataArg should be a BufferType")
   168  			}
   169  			// Unlike length fields whose incorrectness can be prevented easily,
   170  			// it is an invasive change to prevent generation of
   171  			// non-null-terminated strings. Forcibly null-terminating them
   172  			// during encoding allows us to centralize this easily and prevent
   173  			// false positive buffer overflows in KFuzzTest targets.
   174  			if buffer.Kind == BufferString && (len(data) == 0 || data[len(data)-1] != byte(0)) {
   175  				data = append(data, byte(0))
   176  			}
   177  			encoded.Write(data)
   178  		case *ConstArg:
   179  			val, _ := a.Value()
   180  			switch a.Size() {
   181  			case 1:
   182  				binary.Write(&encoded, binary.LittleEndian, uint8(val))
   183  			case 2:
   184  				binary.Write(&encoded, binary.LittleEndian, uint16(val))
   185  			case 4:
   186  				binary.Write(&encoded, binary.LittleEndian, uint32(val))
   187  			case 8:
   188  				binary.Write(&encoded, binary.LittleEndian, val)
   189  			default:
   190  				panic(fmt.Sprintf("unsupported constant size: %d", a.Size()))
   191  			}
   192  			// TODO: handle union args.
   193  		default:
   194  			panic(fmt.Sprintf("tried to serialize unsupported type: %s", a.Type().Name()))
   195  		}
   196  	}
   197  
   198  	return encoded.Bytes(), relocations
   199  }
   200  
   201  // MarshallKFuzzTestArg serializes a syzkaller Arg into a flat binary format
   202  // understood by the KFuzzTest kernel interface (see `include/linux/kfuzztest.h`).
   203  //
   204  // The goal is to represent a tree-like structure of arguments (which may contain
   205  // pointers and cycles) as a single byte slice that the kernel can deserialize
   206  // into a set of distinct heap allocations.
   207  //
   208  // The binary format consists of three contiguous parts, in this order:
   209  //
   210  //  1. Region Array: A header describing all logical memory regions that will be
   211  //     allocated by the kernel. Each `relocRegion` defines a region's unique `id`,
   212  //     its `size`, its `alignment`, and its `start` offset within the payload.
   213  //     The kernel uses this table to create one distinct heap allocation per region.
   214  //
   215  //  2. Relocation Table: A header containing a list of `relocationEntry` structs.
   216  //     Each entry identifies the location of a pointer field within the payload
   217  //     (via a `regionID` and `regionOffset`) and maps it to the logical region
   218  //     it points to (via a `value` which holds the pointee's `regionID`).
   219  //     A NULL pointer is identified by the special value `kFuzzTestNilPtrVal`.
   220  //
   221  //  3. Payload: The raw, serialized data for all arguments, laid out as a single
   222  //     contiguous block of memory with padded regions as per the KFuzzTest input
   223  //     format's specification defined in `Documentation/dev-tools/kfuzztest.rst`.
   224  //
   225  // Cycles are handled by tracking visited arguments, ensuring that a region is
   226  // only visited and encoded once.
   227  //
   228  // For a concrete example of the final binary layout, see the test cases for this
   229  // function in `prog/kfuzztest_test.go`.
   230  func MarshallKFuzztestArg(topLevel Arg) []byte {
   231  	regions := []kFuzzTestRegion{}
   232  	allRelocations := []kFuzzTestRelocation{}
   233  	visitedRegions := make(map[Arg]int)
   234  	queue := newSliceQueue[Arg]()
   235  	var payload bytes.Buffer
   236  	queue.push(topLevel)
   237  	maxAlignment := uint64(8)
   238  
   239  	if topLevel == nil {
   240  		return []byte{}
   241  	}
   242  
   243  Loop:
   244  	for {
   245  		if queue.isEmpty() {
   246  			break Loop
   247  		}
   248  
   249  		reg := queue.pop()
   250  		if _, visited := visitedRegions[reg]; visited {
   251  			continue Loop
   252  		}
   253  
   254  		alignment := max(kFuzzTestMinalign, reg.Type().Alignment())
   255  		maxAlignment = max(maxAlignment, alignment)
   256  
   257  		regionData, relocations := kFuzzTestExpandRegion(reg)
   258  		for _, reloc := range relocations {
   259  			if reloc.dstRegion == nil {
   260  				continue
   261  			}
   262  			if _, visited := visitedRegions[reloc.dstRegion]; !visited {
   263  				queue.push(reloc.dstRegion)
   264  			}
   265  		}
   266  		allRelocations = append(allRelocations, relocations...)
   267  
   268  		padWithAlignment(&payload, alignment, 0)
   269  		regions = append(regions, kFuzzTestRegion{
   270  			offset: uint32(payload.Len()),
   271  			size:   uint32(len(regionData))},
   272  		)
   273  		visitedRegions[reg] = len(regions) - 1
   274  		payload.Write(regionData)
   275  		// The end of the payload should have at least kFuzzTestPoisonSize bytes
   276  		// of padding, and be aligned to kFuzzTestPoisonSize.
   277  		padWithAlignment(&payload, kFuzzTestPoisonSize, kFuzzTestPoisonSize)
   278  	}
   279  
   280  	headerLen := 0x8 // Two integer values - the magic value, and the version number.
   281  	regionArrayLen := kFuzzTestRegionArraySize(len(regions))
   282  	relocTableLen := kFuzzTestRelocTableSize(len(allRelocations))
   283  	metadataLen := headerLen + regionArrayLen + relocTableLen
   284  
   285  	// The payload needs to be aligned to max alignment to ensure that all
   286  	// nested structs are properly aligned, and there should be enough padding
   287  	// so that the region before the payload can be poisoned with a redzone.
   288  	paddingBytes := roundUpPowerOfTwo(uint64(metadataLen)+kFuzzTestPoisonSize, maxAlignment) - uint64(metadataLen)
   289  
   290  	var out bytes.Buffer
   291  	kFuzzTestWritePrefix(&out)
   292  	kFuzzTestWriteRegionArray(&out, regions)
   293  	kFuzzTestWriteRelocTable(&out, &visitedRegions, allRelocations, paddingBytes)
   294  	out.Write(payload.Bytes())
   295  	return out.Bytes()
   296  }