github.com/google/syzkaller@v0.0.0-20240517125934-c0f1611a36d6/pkg/fuzzer/job.go (about) 1 // Copyright 2024 syzkaller project authors. All rights reserved. 2 // Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. 3 4 package fuzzer 5 6 import ( 7 "fmt" 8 "math/rand" 9 10 "github.com/google/syzkaller/pkg/corpus" 11 "github.com/google/syzkaller/pkg/cover" 12 "github.com/google/syzkaller/pkg/flatrpc" 13 "github.com/google/syzkaller/pkg/fuzzer/queue" 14 "github.com/google/syzkaller/pkg/ipc" 15 "github.com/google/syzkaller/pkg/signal" 16 "github.com/google/syzkaller/pkg/stats" 17 "github.com/google/syzkaller/prog" 18 ) 19 20 type job interface { 21 run(fuzzer *Fuzzer) 22 } 23 24 type ProgTypes int 25 26 const ( 27 progCandidate ProgTypes = 1 << iota 28 progMinimized 29 progSmashed 30 progInTriage 31 ) 32 33 func genProgRequest(fuzzer *Fuzzer, rnd *rand.Rand) *queue.Request { 34 p := fuzzer.target.Generate(rnd, 35 prog.RecommendedCalls, 36 fuzzer.ChoiceTable()) 37 return &queue.Request{ 38 Prog: p, 39 ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal), 40 Stat: fuzzer.statExecGenerate, 41 } 42 } 43 44 func mutateProgRequest(fuzzer *Fuzzer, rnd *rand.Rand) *queue.Request { 45 p := fuzzer.Config.Corpus.ChooseProgram(rnd) 46 if p == nil { 47 return nil 48 } 49 newP := p.Clone() 50 newP.Mutate(rnd, 51 prog.RecommendedCalls, 52 fuzzer.ChoiceTable(), 53 fuzzer.Config.NoMutateCalls, 54 fuzzer.Config.Corpus.Programs(), 55 ) 56 return &queue.Request{ 57 Prog: newP, 58 ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal), 59 Stat: fuzzer.statExecFuzz, 60 } 61 } 62 63 func candidateRequest(fuzzer *Fuzzer, input Candidate) (*queue.Request, ProgTypes) { 64 flags := progCandidate 65 if input.Minimized { 66 flags |= progMinimized 67 } 68 if input.Smashed { 69 flags |= progSmashed 70 } 71 return &queue.Request{ 72 Prog: input.Prog, 73 ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal), 74 Stat: fuzzer.statExecCandidate, 75 Important: true, 76 }, flags 77 } 78 79 // triageJob are programs for which we noticed potential new coverage during 80 // first execution. But we are not sure yet if the coverage is real or not. 81 // During triage we understand if these programs in fact give new coverage, 82 // and if yes, minimize them and add to corpus. 83 type triageJob struct { 84 p *prog.Prog 85 call int 86 info ipc.CallInfo 87 newSignal signal.Signal 88 flags ProgTypes 89 fuzzer *Fuzzer 90 queue queue.Executor 91 } 92 93 func (job *triageJob) execute(req *queue.Request, flags ProgTypes) *queue.Result { 94 req.Important = true // All triage executions are important. 95 return job.fuzzer.executeWithFlags(job.queue, req, flags) 96 } 97 98 func (job *triageJob) run(fuzzer *Fuzzer) { 99 fuzzer.statNewInputs.Add(1) 100 job.fuzzer = fuzzer 101 102 callName := fmt.Sprintf("call #%v %v", job.call, job.p.CallName(job.call)) 103 fuzzer.Logf(3, "triaging input for %v (new signal=%v)", callName, job.newSignal.Len()) 104 105 // Compute input coverage and non-flaky signal for minimization. 106 info, stop := job.deflake(job.execute, fuzzer.statExecTriage, fuzzer.Config.FetchRawCover) 107 if stop || info.newStableSignal.Empty() { 108 return 109 } 110 if job.flags&progMinimized == 0 { 111 stop = job.minimize(info.newStableSignal) 112 if stop { 113 return 114 } 115 } 116 if !fuzzer.Config.NewInputFilter(job.p.CallName(job.call)) { 117 return 118 } 119 fuzzer.Logf(2, "added new input for %v to the corpus: %s", callName, job.p) 120 if job.flags&progSmashed == 0 { 121 fuzzer.startJob(fuzzer.statJobsSmash, &smashJob{ 122 p: job.p.Clone(), 123 call: job.call, 124 }) 125 } 126 input := corpus.NewInput{ 127 Prog: job.p, 128 Call: job.call, 129 Signal: info.stableSignal, 130 Cover: info.cover.Serialize(), 131 RawCover: info.rawCover, 132 } 133 fuzzer.Config.Corpus.Save(input) 134 } 135 136 type deflakedCover struct { 137 stableSignal signal.Signal 138 newStableSignal signal.Signal 139 cover cover.Cover 140 rawCover []uint32 141 } 142 143 func (job *triageJob) deflake(exec func(*queue.Request, ProgTypes) *queue.Result, stat *stats.Val, 144 rawCover bool) (info deflakedCover, stop bool) { 145 // As demonstrated in #4639, programs reproduce with a very high, but not 100% probability. 146 // The triage algorithm must tolerate this, so let's pick the signal that is common 147 // to 3 out of 5 runs. 148 // By binomial distribution, a program that reproduces 80% of time will pass deflake() 149 // with a 94% probability. If it reproduces 90% of time, it passes in 99% of cases. 150 const ( 151 needRuns = 3 152 maxRuns = 5 153 ) 154 signals := make([]signal.Signal, needRuns) 155 for i := 0; i < maxRuns; i++ { 156 if job.newSignal.IntersectsWith(signals[needRuns-1]) { 157 // We already have the right deflaked signal. 158 break 159 } 160 if left := maxRuns - i; left < needRuns && 161 !job.newSignal.IntersectsWith(signals[needRuns-left-1]) { 162 // There's no chance to get coverage common to needRuns. 163 break 164 } 165 result := exec(&queue.Request{ 166 Prog: job.p, 167 ExecOpts: setFlags(flatrpc.ExecFlagCollectCover | flatrpc.ExecFlagCollectSignal), 168 ReturnAllSignal: true, 169 Stat: stat, 170 }, progInTriage) 171 if result.Stop() { 172 stop = true 173 return 174 } 175 if !reexecutionSuccess(result.Info, &job.info, job.call) { 176 // The call was not executed or failed. 177 continue 178 } 179 thisSignal, thisCover := getSignalAndCover(job.p, result.Info, job.call) 180 if len(info.rawCover) == 0 && rawCover { 181 info.rawCover = thisCover 182 } 183 info.cover.Merge(thisCover) 184 for j := len(signals) - 1; j > 0; j-- { 185 intersect := signals[j-1].Intersection(thisSignal) 186 signals[j].Merge(intersect) 187 } 188 signals[0].Merge(thisSignal) 189 } 190 191 info.stableSignal = signals[needRuns-1] 192 info.newStableSignal = job.newSignal.Intersection(info.stableSignal) 193 return 194 } 195 196 func (job *triageJob) minimize(newSignal signal.Signal) (stop bool) { 197 const minimizeAttempts = 3 198 job.p, job.call = prog.Minimize(job.p, job.call, false, 199 func(p1 *prog.Prog, call1 int) bool { 200 if stop { 201 return false 202 } 203 for i := 0; i < minimizeAttempts; i++ { 204 result := job.execute(&queue.Request{ 205 Prog: p1, 206 ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal), 207 SignalFilter: newSignal, 208 SignalFilterCall: call1, 209 Stat: job.fuzzer.statExecMinimize, 210 }, 0) 211 if result.Stop() { 212 stop = true 213 return false 214 } 215 info := result.Info 216 if !reexecutionSuccess(info, &job.info, call1) { 217 // The call was not executed or failed. 218 continue 219 } 220 thisSignal, _ := getSignalAndCover(p1, info, call1) 221 if newSignal.Intersection(thisSignal).Len() == newSignal.Len() { 222 return true 223 } 224 } 225 return false 226 }) 227 return stop 228 } 229 230 func reexecutionSuccess(info *ipc.ProgInfo, oldInfo *ipc.CallInfo, call int) bool { 231 if info == nil || len(info.Calls) == 0 { 232 return false 233 } 234 if call != -1 { 235 // Don't minimize calls from successful to unsuccessful. 236 // Successful calls are much more valuable. 237 if oldInfo.Errno == 0 && info.Calls[call].Errno != 0 { 238 return false 239 } 240 return len(info.Calls[call].Signal) != 0 241 } 242 return len(info.Extra.Signal) != 0 243 } 244 245 func getSignalAndCover(p *prog.Prog, info *ipc.ProgInfo, call int) (signal.Signal, []uint32) { 246 inf := &info.Extra 247 if call != -1 { 248 inf = &info.Calls[call] 249 } 250 return signal.FromRaw(inf.Signal, signalPrio(p, inf, call)), inf.Cover 251 } 252 253 type smashJob struct { 254 p *prog.Prog 255 call int 256 } 257 258 func (job *smashJob) run(fuzzer *Fuzzer) { 259 fuzzer.Logf(2, "smashing the program %s (call=%d):", job.p, job.call) 260 if fuzzer.Config.Comparisons && job.call >= 0 { 261 fuzzer.startJob(fuzzer.statJobsHints, &hintsJob{ 262 p: job.p.Clone(), 263 call: job.call, 264 }) 265 } 266 267 const iters = 75 268 rnd := fuzzer.rand() 269 for i := 0; i < iters; i++ { 270 p := job.p.Clone() 271 p.Mutate(rnd, prog.RecommendedCalls, 272 fuzzer.ChoiceTable(), 273 fuzzer.Config.NoMutateCalls, 274 fuzzer.Config.Corpus.Programs()) 275 result := fuzzer.execute(fuzzer.smashQueue, &queue.Request{ 276 Prog: p, 277 ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal), 278 Stat: fuzzer.statExecSmash, 279 }) 280 if result.Stop() { 281 return 282 } 283 if fuzzer.Config.Collide { 284 result := fuzzer.execute(fuzzer.smashQueue, &queue.Request{ 285 Prog: randomCollide(p, rnd), 286 Stat: fuzzer.statExecCollide, 287 }) 288 if result.Stop() { 289 return 290 } 291 } 292 } 293 if fuzzer.Config.FaultInjection && job.call >= 0 { 294 job.faultInjection(fuzzer) 295 } 296 } 297 298 func randomCollide(origP *prog.Prog, rnd *rand.Rand) *prog.Prog { 299 if rnd.Intn(5) == 0 { 300 // Old-style collide with a 20% probability. 301 p, err := prog.DoubleExecCollide(origP, rnd) 302 if err == nil { 303 return p 304 } 305 } 306 if rnd.Intn(4) == 0 { 307 // Duplicate random calls with a 20% probability (25% * 80%). 308 p, err := prog.DupCallCollide(origP, rnd) 309 if err == nil { 310 return p 311 } 312 } 313 p := prog.AssignRandomAsync(origP, rnd) 314 if rnd.Intn(2) != 0 { 315 prog.AssignRandomRerun(p, rnd) 316 } 317 return p 318 } 319 320 func (job *smashJob) faultInjection(fuzzer *Fuzzer) { 321 for nth := 1; nth <= 100; nth++ { 322 fuzzer.Logf(2, "injecting fault into call %v, step %v", 323 job.call, nth) 324 newProg := job.p.Clone() 325 newProg.Calls[job.call].Props.FailNth = nth 326 result := fuzzer.execute(fuzzer.smashQueue, &queue.Request{ 327 Prog: newProg, 328 Stat: fuzzer.statExecSmash, 329 }) 330 if result.Stop() { 331 return 332 } 333 info := result.Info 334 if info != nil && len(info.Calls) > job.call && 335 info.Calls[job.call].Flags&ipc.CallFaultInjected == 0 { 336 break 337 } 338 } 339 } 340 341 type hintsJob struct { 342 p *prog.Prog 343 call int 344 } 345 346 func (job *hintsJob) run(fuzzer *Fuzzer) { 347 // First execute the original program twice to get comparisons from KCOV. 348 // The second execution lets us filter out flaky values, which seem to constitute ~30-40%. 349 p := job.p 350 351 var comps prog.CompMap 352 for i := 0; i < 2; i++ { 353 result := fuzzer.execute(fuzzer.smashQueue, &queue.Request{ 354 Prog: p, 355 ExecOpts: setFlags(flatrpc.ExecFlagCollectComps), 356 Stat: fuzzer.statExecSeed, 357 }) 358 if result.Stop() || result.Info == nil { 359 return 360 } 361 if i == 0 { 362 comps = result.Info.Calls[job.call].Comps 363 if len(comps) == 0 { 364 return 365 } 366 } else { 367 comps.InplaceIntersect(result.Info.Calls[job.call].Comps) 368 } 369 } 370 371 // Then mutate the initial program for every match between 372 // a syscall argument and a comparison operand. 373 // Execute each of such mutants to check if it gives new coverage. 374 p.MutateWithHints(job.call, comps, 375 func(p *prog.Prog) bool { 376 result := fuzzer.execute(fuzzer.smashQueue, &queue.Request{ 377 Prog: p, 378 ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal), 379 Stat: fuzzer.statExecHint, 380 }) 381 return !result.Stop() 382 }) 383 }