github.com/google/syzkaller@v0.0.0-20251211124644-a066d2bc4b02/prog/mutation.go (about) 1 // Copyright 2015 syzkaller project authors. All rights reserved. 2 // Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. 3 4 package prog 5 6 import ( 7 "encoding/binary" 8 "fmt" 9 "math" 10 "math/rand" 11 "sort" 12 13 "github.com/google/syzkaller/pkg/image" 14 ) 15 16 // Maximum length of generated binary blobs inserted into the program. 17 const maxBlobLen = uint64(100 << 10) 18 19 // Mutate program p. 20 // 21 // p: The program to mutate. 22 // rs: Random source. 23 // ncalls: The allowed maximum calls in mutated program. 24 // ct: ChoiceTable for syscalls. 25 // noMutate: Set of IDs of syscalls which should not be mutated. 26 // corpus: The entire corpus, including original program p. 27 func (p *Prog) Mutate(rs rand.Source, ncalls int, ct *ChoiceTable, noMutate map[int]bool, corpus []*Prog) { 28 p.MutateWithOpts(rs, ncalls, ct, noMutate, corpus, DefaultMutateOpts) 29 } 30 31 var DefaultMutateOpts = MutateOpts{ 32 ExpectedIterations: 5, 33 MutateArgCount: 3, 34 35 SquashWeight: 50, 36 SpliceWeight: 200, 37 InsertWeight: 100, 38 MutateArgWeight: 100, 39 RemoveCallWeight: 10, 40 } 41 42 type MutateOpts struct { 43 ExpectedIterations int 44 MutateArgCount int 45 SquashWeight int 46 SpliceWeight int 47 InsertWeight int 48 MutateArgWeight int 49 RemoveCallWeight int 50 } 51 52 func (o MutateOpts) weight() int { 53 return o.SquashWeight + o.SpliceWeight + o.InsertWeight + o.MutateArgWeight + o.RemoveCallWeight 54 } 55 56 func (p *Prog) MutateWithOpts(rs rand.Source, ncalls int, ct *ChoiceTable, noMutate map[int]bool, 57 corpus []*Prog, opts MutateOpts) { 58 if p.isUnsafe { 59 panic("mutation of unsafe programs is not supposed to be done") 60 } 61 totalWeight := opts.weight() 62 r := newRand(p.Target, rs) 63 ncalls = max(ncalls, len(p.Calls)) 64 ctx := &mutator{ 65 p: p, 66 r: r, 67 ncalls: ncalls, 68 ct: ct, 69 noMutate: noMutate, 70 corpus: corpus, 71 opts: opts, 72 } 73 for stop, ok := false, false; !stop; stop = ok && len(p.Calls) != 0 && r.oneOf(opts.ExpectedIterations) { 74 val := r.Intn(totalWeight) 75 val -= opts.SquashWeight 76 if val < 0 { 77 // Not all calls have anything squashable, 78 // so this has lower priority in reality. 79 ok = ctx.squashAny() 80 continue 81 } 82 val -= opts.SpliceWeight 83 if val < 0 { 84 ok = ctx.splice() 85 continue 86 } 87 val -= opts.InsertWeight 88 if val < 0 { 89 ok = ctx.insertCall() 90 continue 91 } 92 val -= opts.MutateArgWeight 93 if val < 0 { 94 ok = ctx.mutateArg() 95 continue 96 } 97 ok = ctx.removeCall() 98 } 99 p.sanitizeFix() 100 p.debugValidate() 101 if got := len(p.Calls); got < 1 || got > ncalls { 102 panic(fmt.Sprintf("bad number of calls after mutation: %v, want [1, %v]", got, ncalls)) 103 } 104 } 105 106 // Internal state required for performing mutations -- currently this matches 107 // the arguments passed to Mutate(). 108 type mutator struct { 109 p *Prog // The program to mutate. 110 r *randGen // The randGen instance. 111 ncalls int // The allowed maximum calls in mutated program. 112 ct *ChoiceTable // ChoiceTable for syscalls. 113 noMutate map[int]bool // Set of IDs of syscalls which should not be mutated. 114 corpus []*Prog // The entire corpus, including original program p. 115 opts MutateOpts 116 } 117 118 // This function selects a random other program p0 out of the corpus, and 119 // mutates ctx.p as follows: preserve ctx.p's Calls up to a random index i 120 // (exclusive) concatenated with p0's calls from index i (inclusive). 121 func (ctx *mutator) splice() bool { 122 p, r := ctx.p, ctx.r 123 if len(ctx.corpus) == 0 || len(p.Calls) == 0 || len(p.Calls) >= ctx.ncalls { 124 return false 125 } 126 p0 := ctx.corpus[r.Intn(len(ctx.corpus))] 127 p0c := p0.Clone() 128 idx := r.Intn(len(p.Calls)) 129 p.Calls = append(p.Calls[:idx], append(p0c.Calls, p.Calls[idx:]...)...) 130 for i := len(p.Calls) - 1; i >= ctx.ncalls; i-- { 131 p.RemoveCall(i) 132 } 133 return true 134 } 135 136 // Picks a random complex pointer and squashes its arguments into an ANY. 137 // Subsequently, if the ANY contains blobs, mutates a random blob. 138 func (ctx *mutator) squashAny() bool { 139 p, r := ctx.p, ctx.r 140 complexPtrs := p.complexPtrs() 141 if len(complexPtrs) == 0 { 142 return false 143 } 144 ptr := complexPtrs[r.Intn(len(complexPtrs))] 145 if ctx.noMutate[ptr.call.Meta.ID] { 146 return false 147 } 148 if ptr.call.Meta.Attrs.NoSquash { 149 return false 150 } 151 if !p.Target.isAnyPtr(ptr.arg.Type()) { 152 p.Target.squashPtr(ptr.arg) 153 } 154 var blobs []*DataArg 155 var bases []*PointerArg 156 ForeachSubArg(ptr.arg, func(arg Arg, ctx *ArgCtx) { 157 if data, ok := arg.(*DataArg); ok && arg.Dir() != DirOut { 158 blobs = append(blobs, data) 159 bases = append(bases, ctx.Base) 160 } 161 }) 162 if len(blobs) == 0 { 163 return false 164 } 165 // Note: we need to call analyze before we mutate the blob. 166 // After mutation the blob can grow out of bounds of the data area 167 // and analyze will crash with out-of-bounds access while marking existing allocations. 168 s := analyze(ctx.ct, ctx.corpus, p, ptr.call) 169 // TODO(dvyukov): we probably want special mutation for ANY. 170 // E.g. merging adjacent ANYBLOBs (we don't create them, 171 // but they can appear in future); or replacing ANYRES 172 // with a blob (and merging it with adjacent blobs). 173 idx := r.Intn(len(blobs)) 174 arg := blobs[idx] 175 base := bases[idx] 176 baseSize := base.Res.Size() 177 arg.data = mutateData(r, arg.Data(), 0, maxBlobLen) 178 // Update base pointer if size has increased. 179 if baseSize < base.Res.Size() { 180 newArg := r.allocAddr(s, base.Type(), base.Dir(), base.Res.Size(), base.Res) 181 *base = *newArg 182 } 183 return true 184 } 185 186 // Inserts a new call at a randomly chosen point (with bias towards the end of 187 // existing program). Does not insert a call if program already has ncalls. 188 func (ctx *mutator) insertCall() bool { 189 p, r := ctx.p, ctx.r 190 if len(p.Calls) >= ctx.ncalls { 191 return false 192 } 193 idx := r.biasedRand(len(p.Calls)+1, 5) 194 var c *Call 195 if idx < len(p.Calls) { 196 c = p.Calls[idx] 197 } 198 s := analyze(ctx.ct, ctx.corpus, p, c) 199 calls := r.generateCall(s, p, idx) 200 p.insertBefore(c, calls) 201 for len(p.Calls) > ctx.ncalls { 202 p.RemoveCall(idx) 203 } 204 return true 205 } 206 207 // Removes a random call from program. 208 func (ctx *mutator) removeCall() bool { 209 p, r := ctx.p, ctx.r 210 if len(p.Calls) == 0 { 211 return false 212 } 213 idx := r.Intn(len(p.Calls)) 214 p.RemoveCall(idx) 215 return true 216 } 217 218 // Mutate an argument of a random call. 219 func (ctx *mutator) mutateArg() bool { 220 p, r := ctx.p, ctx.r 221 if len(p.Calls) == 0 { 222 return false 223 } 224 225 idx := chooseCall(p, r) 226 if idx < 0 { 227 return false 228 } 229 c := p.Calls[idx] 230 if c.Meta.Attrs.KFuzzTest { 231 tmp := r.genKFuzzTest 232 r.genKFuzzTest = true 233 defer func() { 234 r.genKFuzzTest = tmp 235 }() 236 } 237 if ctx.noMutate[c.Meta.ID] { 238 return false 239 } 240 updateSizes := true 241 for stop, ok := false, false; !stop; stop = ok && r.oneOf(ctx.opts.MutateArgCount) { 242 ok = true 243 ma := &mutationArgs{target: p.Target, ignoreLengths: c.Meta.Attrs.KFuzzTest} 244 ForeachArg(c, ma.collectArg) 245 if len(ma.args) == 0 { 246 return false 247 } 248 s := analyze(ctx.ct, ctx.corpus, p, c) 249 arg, argCtx := ma.chooseArg(r.Rand) 250 calls, ok1 := p.Target.mutateArg(r, s, arg, argCtx, &updateSizes) 251 if !ok1 { 252 ok = false 253 continue 254 } 255 moreCalls, fieldsPatched := r.patchConditionalFields(c, s) 256 calls = append(calls, moreCalls...) 257 p.insertBefore(c, calls) 258 idx += len(calls) 259 for len(p.Calls) > ctx.ncalls { 260 idx-- 261 p.RemoveCall(idx) 262 } 263 if idx < 0 || idx >= len(p.Calls) || p.Calls[idx] != c { 264 panic(fmt.Sprintf("wrong call index: idx=%v calls=%v p.Calls=%v ncalls=%v", 265 idx, len(calls), len(p.Calls), ctx.ncalls)) 266 } 267 if updateSizes || fieldsPatched { 268 p.Target.assignSizesCall(c) 269 } 270 } 271 return true 272 } 273 274 // Select a call based on the complexity of the arguments. 275 func chooseCall(p *Prog, r *randGen) int { 276 var prioSum float64 277 var callPriorities []float64 278 for _, c := range p.Calls { 279 var totalPrio float64 280 ForeachArg(c, func(arg Arg, ctx *ArgCtx) { 281 prio, stopRecursion := arg.Type().getMutationPrio(p.Target, arg, false, c.Meta.Attrs.KFuzzTest) 282 totalPrio += prio 283 ctx.Stop = stopRecursion 284 }) 285 prioSum += totalPrio 286 callPriorities = append(callPriorities, prioSum) 287 } 288 if prioSum == 0 { 289 return -1 // All calls are without arguments. 290 } 291 return sort.SearchFloat64s(callPriorities, prioSum*r.Float64()) 292 } 293 294 func (target *Target) mutateArg(r *randGen, s *state, arg Arg, ctx ArgCtx, updateSizes *bool) ([]*Call, bool) { 295 var baseSize uint64 296 if ctx.Base != nil { 297 baseSize = ctx.Base.Res.Size() 298 } 299 calls, retry, preserve := arg.Type().mutate(r, s, arg, ctx) 300 if retry { 301 return nil, false 302 } 303 if preserve { 304 *updateSizes = false 305 } 306 // Update base pointer if size has increased. 307 if base := ctx.Base; base != nil && baseSize < base.Res.Size() { 308 newArg := r.allocAddr(s, base.Type(), base.Dir(), base.Res.Size(), base.Res) 309 replaceArg(base, newArg) 310 } 311 return calls, true 312 } 313 314 func regenerate(r *randGen, s *state, arg Arg) (calls []*Call, retry, preserve bool) { 315 var newArg Arg 316 newArg, calls = r.generateArg(s, arg.Type(), arg.Dir()) 317 replaceArg(arg, newArg) 318 return 319 } 320 321 func mutateInt(r *randGen, a *ConstArg, t *IntType) uint64 { 322 switch { 323 case r.nOutOf(1, 3): 324 return a.Val + (uint64(r.Intn(4)) + 1) 325 case r.nOutOf(1, 2): 326 return a.Val - (uint64(r.Intn(4)) + 1) 327 default: 328 return a.Val ^ (1 << uint64(r.Intn(int(t.TypeBitSize())))) 329 } 330 } 331 332 func mutateAlignedInt(r *randGen, a *ConstArg, t *IntType) uint64 { 333 rangeEnd := t.RangeEnd 334 if t.RangeBegin == 0 && int64(rangeEnd) == -1 { 335 // Special [0:-1] range for all possible values. 336 rangeEnd = uint64(1<<t.TypeBitSize() - 1) 337 } 338 index := (a.Val - t.RangeBegin) / t.Align 339 misalignment := (a.Val - t.RangeBegin) % t.Align 340 switch { 341 case r.nOutOf(1, 3): 342 index += uint64(r.Intn(4)) + 1 343 case r.nOutOf(1, 2): 344 index -= uint64(r.Intn(4)) + 1 345 default: 346 index ^= 1 << uint64(r.Intn(int(t.TypeBitSize()))) 347 } 348 lastIndex := (rangeEnd - t.RangeBegin) / t.Align 349 index %= lastIndex + 1 350 return t.RangeBegin + index*t.Align + misalignment 351 } 352 353 func (t *IntType) mutate(r *randGen, s *state, arg Arg, ctx ArgCtx) (calls []*Call, retry, preserve bool) { 354 if r.bin() { 355 return regenerate(r, s, arg) 356 } 357 a := arg.(*ConstArg) 358 if t.Align == 0 { 359 a.Val = mutateInt(r, a, t) 360 } else { 361 a.Val = mutateAlignedInt(r, a, t) 362 } 363 a.Val = truncateToBitSize(a.Val, t.TypeBitSize()) 364 return 365 } 366 367 func (t *FlagsType) mutate(r *randGen, s *state, arg Arg, ctx ArgCtx) (calls []*Call, retry, preserve bool) { 368 a := arg.(*ConstArg) 369 for oldVal := a.Val; oldVal == a.Val; { 370 a.Val = r.flags(t.Vals, t.BitMask, a.Val) 371 } 372 return 373 } 374 375 func (t *LenType) mutate(r *randGen, s *state, arg Arg, ctx ArgCtx) (calls []*Call, retry, preserve bool) { 376 if !r.mutateSize(arg.(*ConstArg), *ctx.Parent, ctx.Fields) { 377 retry = true 378 return 379 } 380 preserve = true 381 return 382 } 383 384 func (t *ResourceType) mutate(r *randGen, s *state, arg Arg, ctx ArgCtx) (calls []*Call, retry, preserve bool) { 385 return regenerate(r, s, arg) 386 } 387 388 func (t *VmaType) mutate(r *randGen, s *state, arg Arg, ctx ArgCtx) (calls []*Call, retry, preserve bool) { 389 return regenerate(r, s, arg) 390 } 391 392 func (t *ProcType) mutate(r *randGen, s *state, arg Arg, ctx ArgCtx) (calls []*Call, retry, preserve bool) { 393 return regenerate(r, s, arg) 394 } 395 396 func (t *BufferType) mutate(r *randGen, s *state, arg Arg, ctx ArgCtx) (calls []*Call, retry, preserve bool) { 397 minLen, maxLen := uint64(0), maxBlobLen 398 if t.Kind == BufferBlobRange { 399 minLen, maxLen = t.RangeBegin, t.RangeEnd 400 } 401 a := arg.(*DataArg) 402 if a.Dir() == DirOut { 403 if t.Kind == BufferFilename && r.oneOf(100) { 404 a.size = uint64(r.randFilenameLength()) 405 } else { 406 mutateBufferSize(r, a, minLen, maxLen) 407 } 408 return 409 } 410 switch t.Kind { 411 case BufferBlobRand, BufferBlobRange: 412 data := append([]byte{}, a.Data()...) 413 a.data = mutateData(r, data, minLen, maxLen) 414 case BufferString: 415 if len(t.Values) != 0 { 416 a.data = r.randString(s, t) 417 } else { 418 if t.TypeSize != 0 { 419 minLen, maxLen = t.TypeSize, t.TypeSize 420 } 421 data := append([]byte{}, a.Data()...) 422 a.data = mutateData(r, data, minLen, maxLen) 423 } 424 case BufferFilename: 425 a.data = []byte(r.filename(s, t)) 426 case BufferGlob: 427 if len(t.Values) != 0 { 428 a.data = r.randString(s, t) 429 } else { 430 a.data = []byte(r.filename(s, t)) 431 } 432 case BufferText: 433 data := append([]byte{}, a.Data()...) 434 a.data = r.mutateText(t.Text, data) 435 case BufferCompressed: 436 a.data, retry = r.mutateImage(a.Data()) 437 default: 438 panic("unknown buffer kind") 439 } 440 return 441 } 442 443 func (r *randGen) mutateImage(compressed []byte) (data []byte, retry bool) { 444 data, dtor := image.MustDecompress(compressed) 445 defer dtor() 446 if len(data) == 0 { 447 return compressed, true // Do not mutate empty data. 448 } 449 hm := MakeGenericHeatmap(data, r.Rand) 450 for i := hm.NumMutations(); i > 0; i-- { 451 index := hm.ChooseLocation() 452 width := 1 << uint(r.Intn(4)) 453 if index+width > len(data) { 454 width = 1 455 } 456 storeInt(data[index:], r.randInt(uint64(width*8)), width) 457 } 458 return image.Compress(data), false 459 } 460 461 func mutateBufferSize(r *randGen, arg *DataArg, minLen, maxLen uint64) { 462 for oldSize := arg.Size(); oldSize == arg.Size(); { 463 arg.size += uint64(r.Intn(33)) - 16 464 // Cast to int64 to prevent underflows. 465 arg.size = uint64(max(int64(arg.size), int64(minLen))) 466 arg.size = min(arg.size, maxLen) 467 } 468 } 469 470 func (t *ArrayType) mutate(r *randGen, s *state, arg Arg, ctx ArgCtx) (calls []*Call, retry, preserve bool) { 471 a := arg.(*GroupArg) 472 if len(a.Inner) > 1 && r.oneOf(5) { 473 // Swap array elements. 474 for r.nOutOf(2, 3) { 475 i, j := r.Intn(len(a.Inner)), r.Intn(len(a.Inner)) 476 a.Inner[i], a.Inner[j] = a.Inner[j], a.Inner[i] 477 } 478 } 479 count := uint64(0) 480 switch t.Kind { 481 case ArrayRandLen: 482 if r.bin() { 483 for count = uint64(len(a.Inner)); r.bin(); { 484 count++ 485 } 486 } else { 487 for count == uint64(len(a.Inner)) { 488 count = r.randArrayLen() 489 } 490 } 491 case ArrayRangeLen: 492 if t.RangeBegin == t.RangeEnd { 493 panic("trying to mutate fixed length array") 494 } 495 for count == uint64(len(a.Inner)) { 496 count = r.randRange(t.RangeBegin, t.RangeEnd) 497 } 498 } 499 if count > uint64(len(a.Inner)) { 500 for count > uint64(len(a.Inner)) { 501 newArg, newCalls := r.generateArg(s, t.Elem, a.Dir()) 502 a.Inner = append(a.Inner, newArg) 503 calls = append(calls, newCalls...) 504 for _, c := range newCalls { 505 s.analyze(c) 506 } 507 } 508 } else if count < uint64(len(a.Inner)) { 509 for _, arg := range a.Inner[count:] { 510 removeArg(arg) 511 } 512 a.Inner = a.Inner[:count] 513 } 514 return 515 } 516 517 func (t *PtrType) mutate(r *randGen, s *state, arg Arg, ctx ArgCtx) (calls []*Call, retry, preserve bool) { 518 a := arg.(*PointerArg) 519 // Do not generate special pointers for KFuzzTest calls, as they are 520 // difficult to identify in the kernel and can lead to false positive 521 // crash reports. 522 if r.oneOf(1000) && !r.genKFuzzTest { 523 removeArg(a.Res) 524 index := r.rand(len(r.target.SpecialPointers)) 525 newArg := MakeSpecialPointerArg(t, a.Dir(), index) 526 replaceArg(arg, newArg) 527 return 528 } 529 newArg := r.allocAddr(s, t, a.Dir(), a.Res.Size(), a.Res) 530 replaceArg(arg, newArg) 531 return 532 } 533 534 func (t *StructType) mutate(r *randGen, s *state, arg Arg, ctx ArgCtx) (calls []*Call, retry, preserve bool) { 535 gen := r.target.SpecialTypes[t.Name()] 536 if gen == nil { 537 panic("bad arg returned by mutationArgs: StructType") 538 } 539 var newArg Arg 540 newArg, calls = gen(&Gen{r, s}, t, arg.Dir(), arg) 541 a := arg.(*GroupArg) 542 for i, f := range newArg.(*GroupArg).Inner { 543 replaceArg(a.Inner[i], f) 544 } 545 return 546 } 547 548 func (t *UnionType) mutate(r *randGen, s *state, arg Arg, ctx ArgCtx) (calls []*Call, retry, preserve bool) { 549 if gen := r.target.SpecialTypes[t.Name()]; gen != nil { 550 var newArg Arg 551 newArg, calls = gen(&Gen{r, s}, t, arg.Dir(), arg) 552 replaceArg(arg, newArg) 553 return 554 } 555 a := arg.(*UnionArg) 556 index := r.Intn(len(t.Fields) - 1) 557 if index >= a.Index { 558 index++ 559 } 560 optType, optDir := t.Fields[index].Type, t.Fields[index].Dir(a.Dir()) 561 var newOpt Arg 562 newOpt, calls = r.generateArg(s, optType, optDir) 563 replaceArg(arg, MakeUnionArg(t, a.Dir(), newOpt, index)) 564 return 565 } 566 567 func (t *CsumType) mutate(r *randGen, s *state, arg Arg, ctx ArgCtx) (calls []*Call, retry, preserve bool) { 568 panic("CsumType can't be mutated") 569 } 570 571 func (t *ConstType) mutate(r *randGen, s *state, arg Arg, ctx ArgCtx) (calls []*Call, retry, preserve bool) { 572 panic("ConstType can't be mutated") 573 } 574 575 type mutationArgs struct { 576 target *Target 577 ignoreSpecial bool 578 ignoreLengths bool 579 prioSum float64 580 args []mutationArg 581 argsBuffer [16]mutationArg 582 } 583 584 type mutationArg struct { 585 arg Arg 586 ctx ArgCtx 587 priority float64 588 } 589 590 const ( 591 maxPriority = float64(10) 592 minPriority = float64(1) 593 dontMutate = float64(0) 594 ) 595 596 func (ma *mutationArgs) collectArg(arg Arg, ctx *ArgCtx) { 597 ignoreSpecial := ma.ignoreSpecial 598 ma.ignoreSpecial = false 599 600 typ := arg.Type() 601 prio, stopRecursion := typ.getMutationPrio(ma.target, arg, ignoreSpecial, ma.ignoreLengths) 602 ctx.Stop = stopRecursion 603 604 if prio == dontMutate { 605 return 606 } 607 608 _, isArrayTyp := typ.(*ArrayType) 609 _, isBufferTyp := typ.(*BufferType) 610 if !isBufferTyp && !isArrayTyp && arg.Dir() == DirOut || !typ.Varlen() && typ.Size() == 0 { 611 return 612 } 613 614 if len(ma.args) == 0 { 615 ma.args = ma.argsBuffer[:0] 616 } 617 ma.prioSum += prio 618 ma.args = append(ma.args, mutationArg{arg, *ctx, ma.prioSum}) 619 } 620 621 func (ma *mutationArgs) chooseArg(r *rand.Rand) (Arg, ArgCtx) { 622 goal := ma.prioSum * r.Float64() 623 chosenIdx := sort.Search(len(ma.args), func(i int) bool { return ma.args[i].priority >= goal }) 624 arg := ma.args[chosenIdx] 625 return arg.arg, arg.ctx 626 } 627 628 // TODO: find a way to estimate optimal priority values. 629 // Assign a priority for each type. The boolean is the reference type and it has 630 // the minimum priority, since it has only two possible values. 631 func (t *IntType) getMutationPrio(target *Target, arg Arg, 632 ignoreSpecial, ignoreLengths bool) (prio float64, stopRecursion bool) { 633 // For a integer without a range of values, the priority is based on 634 // the number of bits occupied by the underlying type. 635 plainPrio := math.Log2(float64(t.TypeBitSize())) + 0.1*maxPriority 636 if t.Kind != IntRange { 637 return plainPrio, false 638 } 639 640 size := t.RangeEnd - t.RangeBegin + 1 641 if t.Align != 0 { 642 if t.RangeBegin == 0 && int64(t.RangeEnd) == -1 { 643 // Special [0:-1] range for all possible values. 644 size = (1<<t.TypeBitSize()-1)/t.Align + 1 645 } else { 646 size = (t.RangeEnd-t.RangeBegin)/t.Align + 1 647 } 648 } 649 switch { 650 case size <= 15: 651 // For a small range, we assume that it is effectively 652 // similar with FlagsType and we need to try all possible values. 653 prio = rangeSizePrio(size) 654 case size <= 256: 655 // We consider that a relevant range has at most 256 656 // values (the number of values that can be represented on a byte). 657 prio = maxPriority 658 default: 659 // Ranges larger than 256 are equivalent with a plain integer. 660 prio = plainPrio 661 } 662 return prio, false 663 } 664 665 func (t *StructType) getMutationPrio(target *Target, arg Arg, 666 ignoreSpecial, ignoreLengths bool) (prio float64, stopRecursion bool) { 667 if target.SpecialTypes[t.Name()] == nil || ignoreSpecial { 668 return dontMutate, false 669 } 670 return maxPriority, true 671 } 672 673 func (t *UnionType) getMutationPrio(target *Target, arg Arg, 674 ignoreSpecial, ignoreLengths bool) (prio float64, stopRecursion bool) { 675 if target.SpecialTypes[t.Name()] == nil && len(t.Fields) == 1 || ignoreSpecial { 676 return dontMutate, false 677 } 678 // For a non-special type union with more than one option 679 // we mutate the union itself and also the value of the current option. 680 if target.SpecialTypes[t.Name()] == nil { 681 return maxPriority, false 682 } 683 return maxPriority, true 684 } 685 686 func (t *FlagsType) getMutationPrio(target *Target, arg Arg, 687 ignoreSpecial, ignoreLengths bool) (prio float64, stopRecursion bool) { 688 prio = rangeSizePrio(uint64(len(t.Vals))) 689 if t.BitMask { 690 // We want a higher priority because the mutation will include 691 // more possible operations (bitwise operations). 692 prio += 0.1 * maxPriority 693 } 694 return prio, false 695 } 696 697 // Assigns a priority based on the range size. 698 func rangeSizePrio(size uint64) (prio float64) { 699 switch size { 700 case 0: 701 prio = dontMutate 702 case 1: 703 prio = minPriority 704 default: 705 // Priority proportional with the number of values. After a threshold, the priority is constant. 706 // The threshold is 15 because most of the calls have <= 15 possible values for a flag. 707 prio = math.Min(float64(size)/3+0.4*maxPriority, 0.9*maxPriority) 708 } 709 return prio 710 } 711 712 func (t *PtrType) getMutationPrio(target *Target, arg Arg, 713 ignoreSpecial, ignoreLengths bool) (prio float64, stopRecursion bool) { 714 if arg.(*PointerArg).IsSpecial() { 715 // TODO: we ought to mutate this, but we don't have code for this yet. 716 return dontMutate, false 717 } 718 return 0.3 * maxPriority, false 719 } 720 721 func (t *ConstType) getMutationPrio(target *Target, arg Arg, 722 ignoreSpecial, ignoreLengths bool) (prio float64, stopRecursion bool) { 723 return dontMutate, false 724 } 725 726 func (t *CsumType) getMutationPrio(target *Target, arg Arg, 727 ignoreSpecial, ignoreLengths bool) (prio float64, stopRecursion bool) { 728 return dontMutate, false 729 } 730 731 func (t *ProcType) getMutationPrio(target *Target, arg Arg, 732 ignoreSpecial, ignoreLengths bool) (prio float64, stopRecursion bool) { 733 return 0.5 * maxPriority, false 734 } 735 736 func (t *ResourceType) getMutationPrio(target *Target, arg Arg, 737 ignoreSpecial, ignoreLengths bool) (prio float64, stopRecursion bool) { 738 return 0.5 * maxPriority, false 739 } 740 741 func (t *VmaType) getMutationPrio(target *Target, arg Arg, 742 ignoreSpecial, ignoreLengths bool) (prio float64, stopRecursion bool) { 743 return 0.5 * maxPriority, false 744 } 745 746 func (t *LenType) getMutationPrio(target *Target, arg Arg, 747 ignoreSpecial, ignoreLengths bool) (prio float64, stopRecursion bool) { 748 // Mutating LenType only produces "incorrect" results according to descriptions. 749 if ignoreLengths { 750 return dontMutate, false 751 } 752 return 0.1 * maxPriority, false 753 } 754 755 func (t *BufferType) getMutationPrio(target *Target, arg Arg, 756 ignoreSpecial, ignoreLengths bool) (prio float64, stopRecursion bool) { 757 if arg.Dir() == DirOut && !t.Varlen() { 758 return dontMutate, false 759 } 760 if t.Kind == BufferString && len(t.Values) == 1 { 761 // These are effectively consts (and frequently file names). 762 return dontMutate, false 763 } 764 if t.Kind == BufferCompressed { 765 // Prioritise mutation of compressed buffers, e.g. disk images (`compressed_image`). 766 return maxPriority, false 767 } 768 return 0.8 * maxPriority, false 769 } 770 771 func (t *ArrayType) getMutationPrio(target *Target, arg Arg, 772 ignoreSpecial, ignoreLengths bool) (prio float64, stopRecursion bool) { 773 if t.Kind == ArrayRangeLen && t.RangeBegin == t.RangeEnd { 774 return dontMutate, false 775 } 776 return maxPriority, false 777 } 778 779 func mutateData(r *randGen, data []byte, minLen, maxLen uint64) []byte { 780 for stop := false; !stop; stop = stop && r.oneOf(3) { 781 f := mutateDataFuncs[r.Intn(len(mutateDataFuncs))] 782 data, stop = f(r, data, minLen, maxLen) 783 } 784 return data 785 } 786 787 // The maximum delta for integer mutations. 788 const maxDelta = 35 789 790 var mutateDataFuncs = [...]func(r *randGen, data []byte, minLen, maxLen uint64) ([]byte, bool){ 791 // TODO(dvyukov): duplicate part of data. 792 // Flip bit in byte. 793 func(r *randGen, data []byte, minLen, maxLen uint64) ([]byte, bool) { 794 if len(data) == 0 { 795 return data, false 796 } 797 byt := r.Intn(len(data)) 798 bit := r.Intn(8) 799 data[byt] ^= 1 << uint(bit) 800 return data, true 801 }, 802 // Insert random bytes. 803 func(r *randGen, data []byte, minLen, maxLen uint64) ([]byte, bool) { 804 if len(data) == 0 || uint64(len(data)) >= maxLen { 805 return data, false 806 } 807 n := min(r.Intn(16)+1, int(maxLen)-len(data)) 808 pos := r.Intn(len(data)) 809 for i := 0; i < n; i++ { 810 data = append(data, 0) 811 } 812 copy(data[pos+n:], data[pos:]) 813 for i := 0; i < n; i++ { 814 data[pos+i] = byte(r.Int31()) 815 } 816 if uint64(len(data)) > maxLen || r.bin() { 817 data = data[:len(data)-n] // preserve original length 818 } 819 return data, true 820 }, 821 // Remove bytes. 822 func(r *randGen, data []byte, minLen, maxLen uint64) ([]byte, bool) { 823 if len(data) == 0 { 824 return data, false 825 } 826 n := min(r.Intn(16)+1, len(data)) 827 pos := 0 828 if n < len(data) { 829 pos = r.Intn(len(data) - n) 830 } 831 copy(data[pos:], data[pos+n:]) 832 data = data[:len(data)-n] 833 if uint64(len(data)) < minLen || r.bin() { 834 for i := 0; i < n; i++ { 835 data = append(data, 0) // preserve original length 836 } 837 } 838 return data, true 839 }, 840 // Append a bunch of bytes. 841 func(r *randGen, data []byte, minLen, maxLen uint64) ([]byte, bool) { 842 if uint64(len(data)) >= maxLen { 843 return data, false 844 } 845 const max = 256 846 n := min(max-r.biasedRand(max, 10), int(maxLen)-len(data)) 847 for i := 0; i < n; i++ { 848 data = append(data, byte(r.rand(256))) 849 } 850 return data, true 851 }, 852 // Replace int8/int16/int32/int64 with a random value. 853 func(r *randGen, data []byte, minLen, maxLen uint64) ([]byte, bool) { 854 width := 1 << uint(r.Intn(4)) 855 if len(data) < width { 856 return data, false 857 } 858 i := r.Intn(len(data) - width + 1) 859 storeInt(data[i:], r.Uint64(), width) 860 return data, true 861 }, 862 // Add/subtract from an int8/int16/int32/int64. 863 func(r *randGen, data []byte, minLen, maxLen uint64) ([]byte, bool) { 864 width := 1 << uint(r.Intn(4)) 865 if len(data) < width { 866 return data, false 867 } 868 i := r.Intn(len(data) - width + 1) 869 v := loadInt(data[i:], width) 870 delta := r.rand(2*maxDelta+1) - maxDelta 871 if delta == 0 { 872 delta = 1 873 } 874 if r.oneOf(10) { 875 v = swapInt(v, width) 876 v += delta 877 v = swapInt(v, width) 878 } else { 879 v += delta 880 } 881 storeInt(data[i:], v, width) 882 return data, true 883 }, 884 // Set int8/int16/int32/int64 to an interesting value. 885 func(r *randGen, data []byte, minLen, maxLen uint64) ([]byte, bool) { 886 width := 1 << uint(r.Intn(4)) 887 if len(data) < width { 888 return data, false 889 } 890 i := r.Intn(len(data) - width + 1) 891 value := r.randInt64() 892 if r.oneOf(10) { 893 value = swap64(value) 894 } 895 storeInt(data[i:], value, width) 896 return data, true 897 }, 898 } 899 900 func swap16(v uint16) uint16 { 901 v0 := byte(v >> 0) 902 v1 := byte(v >> 8) 903 v = 0 904 v |= uint16(v1) << 0 905 v |= uint16(v0) << 8 906 return v 907 } 908 909 func swap32(v uint32) uint32 { 910 v0 := byte(v >> 0) 911 v1 := byte(v >> 8) 912 v2 := byte(v >> 16) 913 v3 := byte(v >> 24) 914 v = 0 915 v |= uint32(v3) << 0 916 v |= uint32(v2) << 8 917 v |= uint32(v1) << 16 918 v |= uint32(v0) << 24 919 return v 920 } 921 922 func swap64(v uint64) uint64 { 923 v0 := byte(v >> 0) 924 v1 := byte(v >> 8) 925 v2 := byte(v >> 16) 926 v3 := byte(v >> 24) 927 v4 := byte(v >> 32) 928 v5 := byte(v >> 40) 929 v6 := byte(v >> 48) 930 v7 := byte(v >> 56) 931 v = 0 932 v |= uint64(v7) << 0 933 v |= uint64(v6) << 8 934 v |= uint64(v5) << 16 935 v |= uint64(v4) << 24 936 v |= uint64(v3) << 32 937 v |= uint64(v2) << 40 938 v |= uint64(v1) << 48 939 v |= uint64(v0) << 56 940 return v 941 } 942 943 func swapInt(v uint64, size int) uint64 { 944 switch size { 945 case 1: 946 return v 947 case 2: 948 return uint64(swap16(uint16(v))) 949 case 4: 950 return uint64(swap32(uint32(v))) 951 case 8: 952 return swap64(v) 953 default: 954 panic(fmt.Sprintf("swapInt: bad size %v", size)) 955 } 956 } 957 958 func loadInt(data []byte, size int) uint64 { 959 switch size { 960 case 1: 961 return uint64(data[0]) 962 case 2: 963 return uint64(binary.LittleEndian.Uint16(data)) 964 case 4: 965 return uint64(binary.LittleEndian.Uint32(data)) 966 case 8: 967 return binary.LittleEndian.Uint64(data) 968 default: 969 panic(fmt.Sprintf("loadInt: bad size %v", size)) 970 } 971 } 972 973 func storeInt(data []byte, v uint64, size int) { 974 switch size { 975 case 1: 976 data[0] = uint8(v) 977 case 2: 978 binary.LittleEndian.PutUint16(data, uint16(v)) 979 case 4: 980 binary.LittleEndian.PutUint32(data, uint32(v)) 981 case 8: 982 binary.LittleEndian.PutUint64(data, v) 983 default: 984 panic(fmt.Sprintf("storeInt: bad size %v", size)) 985 } 986 }