gvisor.dev/gvisor@v0.0.0-20240520182842-f9d4d51c7e0f/pkg/bpf/optimizer.go (about) 1 // Copyright 2023 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package bpf 16 17 import ( 18 "fmt" 19 "sort" 20 ) 21 22 const ( 23 // maxConditionalJumpOffset is the maximum offset of a conditional 24 // jump instruction. Conditional jump offsets are specified as an 25 // unsigned 8-bit integer. 26 maxConditionalJumpOffset = (1 << 8) - 1 27 // maxUnconditionalJumpOffset is the maximum offset of an unconditional 28 // jump instruction. 29 // Unconditional jumps are stored in an uint32, but here we limit it to 30 // what would fit in a uint16. 31 // BPF programs (once uploaded into the kernel) are limited to 32 // `BPF_MAXINSNS`, which is 4096 in Linux as of this writing. 33 // We need a value larger than `BPF_MAXINSNS` here in order to support 34 // optimizing programs that are initially larger than `BPF_MAXINSNS` but 35 // that can be optimized to fit within that limit. However, programs that 36 // jump 2^32-1 instructions are probably not optimizable enough to fit 37 // regardless. 38 // This number is a middle ground that should be plenty given the type of 39 // program we expect to optimize, while also not trying too hard to 40 // optimize unoptimizable programs. 41 maxUnconditionalJumpOffset = (1 << 16) - 1 42 ) 43 44 // optimizerFunc is a function type that can optimize a BPF program. 45 // It returns the updated set of instructions, along with whether any 46 // modification was made. 47 type optimizerFunc func(insns []Instruction) ([]Instruction, bool) 48 49 // optimizeConditionalJumps looks for conditional jumps which go to an 50 // unconditional jump that goes to a final target fewer than 51 // `maxConditionalJumpOffset` instructions away. 52 // These can safely be rewritten to not require the extra unconditional jump. 53 // It returns the optimized set of instructions, along with whether any change 54 // was made. 55 func optimizeConditionalJumps(insns []Instruction) ([]Instruction, bool) { 56 changed := false 57 for pc, ins := range insns { 58 if !ins.IsConditionalJump() { 59 continue // Not a conditional jump instruction. 60 } 61 // Take care of "true" target: 62 { 63 jumpTrueOffset := pc + int(ins.JumpIfTrue) + 1 64 jumpTrueIns := insns[jumpTrueOffset] 65 if jumpTrueIns.OpCode&instructionClassMask == Jmp && jumpTrueIns.OpCode&jmpMask == Ja { 66 if finalJumpTrueOffset := int(ins.JumpIfTrue) + 1 + int(jumpTrueIns.K); finalJumpTrueOffset <= maxConditionalJumpOffset { 67 // We can optimize the "true" target. 68 ins.JumpIfTrue = uint8(finalJumpTrueOffset) 69 changed = true 70 } 71 } 72 } 73 // Take care of "false" target: 74 { 75 jumpFalseOffset := pc + int(ins.JumpIfFalse) + 1 76 jumpFalseIns := insns[jumpFalseOffset] 77 if jumpFalseIns.OpCode&instructionClassMask == Jmp && jumpFalseIns.OpCode&jmpMask == Ja { 78 if finalJumpFalseOffset := int(ins.JumpIfFalse) + 1 + int(jumpFalseIns.K); finalJumpFalseOffset <= maxConditionalJumpOffset { 79 // We can optimize the "false" target. 80 ins.JumpIfFalse = uint8(finalJumpFalseOffset) 81 changed = true 82 } 83 } 84 } 85 insns[pc] = ins 86 } 87 return insns, changed 88 } 89 90 // optimizeSameTargetConditionalJumps looks for conditional jumps where both 91 // the "true" and "false" targets go to the same place, and rewrites them to 92 // an unconditional jump to that place. 93 // This can happen even for legitimate programs when resolving the target of 94 // indirect jumps ends up at the same place. 95 // It returns the optimized set of instructions, along with whether any change 96 // was made. 97 func optimizeSameTargetConditionalJumps(insns []Instruction) ([]Instruction, bool) { 98 changed := false 99 for pc, ins := range insns { 100 if !ins.IsConditionalJump() { 101 continue // Not a conditional jump instruction. 102 } 103 if ins.JumpIfTrue != ins.JumpIfFalse { 104 continue // Not the same target. 105 } 106 insns[pc] = Jump(Jmp|Ja, uint32(ins.JumpIfTrue), 0, 0) 107 changed = true 108 } 109 return insns, changed 110 } 111 112 // optimizeUnconditionalJumps looks for conditional jumps which go to another 113 // unconditional jump. 114 func optimizeUnconditionalJumps(insns []Instruction) ([]Instruction, bool) { 115 changed := false 116 for pc, ins := range insns { 117 if !ins.IsUnconditionalJump() { 118 continue // Not an unconditional jump instruction. 119 } 120 jumpOffset := pc + int(ins.K) + 1 121 jumpIns := insns[jumpOffset] 122 if !jumpIns.IsUnconditionalJump() { 123 // Not jumping to an unconditional jump. 124 continue 125 } 126 finalJumpOffset := int(ins.K) + 1 + int(jumpIns.K) 127 if finalJumpOffset > maxUnconditionalJumpOffset { 128 // Final jump offset too large to fit in a single unconditional jump. 129 continue 130 } 131 // We can optimize the final target. 132 ins.K = uint32(finalJumpOffset) 133 insns[pc] = ins 134 changed = true 135 } 136 return insns, changed 137 } 138 139 // codeRemoval efficiently tracks indexes to remove from instructions. 140 type codeRemoval struct { 141 insns []Instruction 142 toRemove []int 143 } 144 145 // MarkRemoved adds a new instruction index to be removed. 146 func (cr *codeRemoval) MarkRemoved(index int) { 147 if cr.toRemove == nil { 148 cr.toRemove = make([]int, 0, len(cr.insns)) 149 } 150 cr.toRemove = append(cr.toRemove, index) 151 } 152 153 // Apply returns the set of instructions after removing marked indexes, 154 // along with a boolean representing whether any instruction was removed. 155 func (cr *codeRemoval) Apply() ([]Instruction, bool) { 156 if len(cr.toRemove) == 0 { 157 return cr.insns, false 158 } 159 sort.Ints(cr.toRemove) 160 for i := len(cr.toRemove) - 1; i >= 0; i-- { 161 pc := cr.toRemove[i] 162 cr.insns = append(cr.insns[:pc], cr.insns[pc+1:]...) 163 decrementJumps(cr.insns, pc) 164 } 165 return cr.insns, true 166 } 167 168 // decrementJumps decrements all jumps within `insns` that are jumping to an 169 // instruction with index larger than `target`, the index of an 170 // instruction that just got removed (i.e. `target` now points to the 171 // instruction that was directly following the removed instruction). 172 // Jumps that targeted `target` itself will not be affected, i.e. they will 173 // point to the instruction that directly followed the removed instruction. 174 // `insns` is modified in-place. 175 func decrementJumps(insns []Instruction, target int) { 176 for pc := 0; pc < target; pc++ { 177 ins := insns[pc] 178 if !ins.IsJump() { 179 continue 180 } 181 if ins.IsUnconditionalJump() { 182 // Unconditional jump, check K: 183 if pc+int(ins.K)+1 > target { 184 ins.K-- 185 } 186 } else { 187 // Conditional jump, check true target: 188 if pc+int(ins.JumpIfTrue)+1 > target { 189 ins.JumpIfTrue-- 190 } 191 // ... And check false target: 192 if pc+int(ins.JumpIfFalse)+1 > target { 193 ins.JumpIfFalse-- 194 } 195 } 196 insns[pc] = ins 197 } 198 } 199 200 // removeZeroInstructionJumps removes unconditional jumps that jump zero 201 // instructions forward. This may seem silly but it can happen due to other 202 // optimizations in this file which decrement jump target indexes. 203 func removeZeroInstructionJumps(insns []Instruction) ([]Instruction, bool) { 204 removal := codeRemoval{insns: insns} 205 for pc, ins := range insns { 206 if !ins.IsUnconditionalJump() || ins.K != 0 { 207 continue 208 } 209 removal.MarkRemoved(pc) 210 } 211 return removal.Apply() 212 } 213 214 // removeDeadCode removes instructions which are unreachable. 215 // This can happen due to the other optimizations in this file, 216 // e.g. optimizeConditionalJumps. 217 // In addition, removing dead code means the program is shorter, 218 // which in turn may make further jump optimizations possible. 219 func removeDeadCode(insns []Instruction) ([]Instruction, bool) { 220 if len(insns) == 0 { 221 return insns, false 222 } 223 224 // Keep track of which lines are reachable from all instructions in the program. 225 reachable := make([]bool, len(insns)) 226 cursors := make([]int, 1, len(insns)) 227 cursors[0] = 0 228 for len(cursors) > 0 { 229 cursor := cursors[0] 230 cursors = cursors[1:] 231 if reachable[cursor] { 232 continue 233 } 234 reachable[cursor] = true 235 ins := insns[cursor] 236 switch ins.OpCode & instructionClassMask { 237 case Ret: 238 // Return instructions are terminal, add no new cursor. 239 case Jmp: 240 // Add a new cursor wherever the jump can go. 241 if ins.IsUnconditionalJump() { 242 // Unconditional jump: 243 cursors = append(cursors, cursor+int(ins.K)+1) 244 } else { 245 // Conditional jump: 246 cursors = append(cursors, cursor+int(ins.JumpIfTrue)+1, cursor+int(ins.JumpIfFalse)+1) 247 } 248 default: 249 // Other instructions simply flow forward. 250 cursors = append(cursors, cursor+1) 251 } 252 } 253 254 // Now remove unreachable code. 255 removal := codeRemoval{insns: insns} 256 for pc := range insns { 257 if !reachable[pc] { 258 removal.MarkRemoved(pc) 259 } 260 } 261 return removal.Apply() 262 } 263 264 // optimizeJumpsToReturn replaces unconditional jumps that go to return 265 // statements by a copy of that return statement. 266 func optimizeJumpsToReturn(insns []Instruction) ([]Instruction, bool) { 267 changed := false 268 for pc, ins := range insns { 269 if !ins.IsUnconditionalJump() { 270 continue // Not an unconditional jump instruction. 271 } 272 targetIns := insns[pc+int(ins.K)+1] 273 if targetIns.OpCode&instructionClassMask != Ret { 274 continue // Not jumping to a return instruction. 275 } 276 insns[pc] = targetIns 277 changed = true 278 } 279 return insns, changed 280 } 281 282 // removeRedundantLoads removes some redundant load instructions 283 // when the value in register A is already the same value as what is 284 // being loaded. 285 func removeRedundantLoads(insns []Instruction) ([]Instruction, bool) { 286 // reverseWalk maps instruction indexes I to the set of instruction indexes 287 // that, after their execution, may result in the control flow jumping to I. 288 reverseWalk := make([]map[int]struct{}, len(insns)) 289 for pc := range insns { 290 reverseWalk[pc] = make(map[int]struct{}) 291 } 292 for pc, ins := range insns { 293 if ins.IsReturn() { 294 continue // Return instructions are terminal. 295 } 296 if ins.IsJump() { 297 for _, offset := range ins.JumpOffsets() { 298 reverseWalk[pc+int(offset.Offset)+1][pc] = struct{}{} 299 } 300 continue 301 } 302 // All other instructions flow through. 303 reverseWalk[pc+1][pc] = struct{}{} 304 } 305 306 // Now look for redundant load instructions. 307 removal := codeRemoval{insns: insns} 308 for pc, ins := range insns { 309 if ins.OpCode&instructionClassMask != Ld { 310 continue 311 } 312 // Walk backwards until either we've reached the beginning of the program, 313 // or we've reached an operation which modifies register A. 314 lastModifiedA := -1 315 beforePCs := reverseWalk[pc] 316 walk: 317 for { 318 switch len(beforePCs) { 319 case 0: 320 // We've reached the beginning of the program without modifying A. 321 break walk 322 case 1: 323 var beforePC int 324 for bpc := range beforePCs { // Note: we know that this map only has one element. 325 beforePC = bpc 326 } 327 if !insns[beforePC].ModifiesRegisterA() { 328 beforePCs = reverseWalk[beforePC] 329 continue walk 330 } 331 lastModifiedA = beforePC 332 break walk 333 default: 334 // Multiple ways to get to `pc`. 335 // For simplicity, we only support the single-branch case right now. 336 break walk 337 } 338 } 339 if lastModifiedA != -1 && insns[pc].Equal(insns[lastModifiedA]) { 340 removal.MarkRemoved(pc) 341 } 342 } 343 return removal.Apply() 344 } 345 346 // jumpRewriteOperation rewrites a jump target. 347 type jumpRewriteOperation struct { 348 pc int // Rewrite instruction at this offset. 349 jumpType JumpType // Rewrite this type of jump. 350 rewriteTo int // Rewrite the jump offset to this value. 351 } 352 353 // rewriteAllJumpsToReturn rewrites *all* jump instructions that go to 354 // `fromPC` to go to `toPC` instead, if possible without converting jumps 355 // from conditional to unconditional. `fromPC` and `toPC` must point to 356 // identical return instructions. 357 // It is all-or-nothing: either all jump instructions must be rewritable 358 // (in which case they will all be rewritten, and this function will 359 // return true), or no jump instructions will be rewritten, and this 360 // function will return false. 361 // This function also returns false in the vacuous case (i.e. there are 362 // no jump instructions that go to `fromPC` in the first place). 363 // This function is used in `optimizeJumpsToSmallestSetOfReturns`. 364 // As a sanity check, it verifies that `fromPC` and `toPC` are functionally 365 // identical return instruction, and panics otherwise. 366 // `rewriteOps` is a buffer of jump rewrite operations meant to be 367 // efficiently reusable across calls to this function. 368 func rewriteAllJumpsToReturn(insns []Instruction, fromPC, toPC int, rewriteOps []jumpRewriteOperation) bool { 369 fromIns, toIns := insns[fromPC], insns[toPC] 370 if !fromIns.IsReturn() { 371 panic(fmt.Sprintf("attempted to rewrite jumps from {pc=%d: %v} which is not a return instruction", fromPC, fromIns)) 372 } 373 if !toIns.IsReturn() { 374 panic(fmt.Sprintf("attempted to rewrite jumps to {pc=%d: %v} which is not a return instruction", toPC, toIns)) 375 } 376 if !fromIns.Equal(toIns) { 377 panic(fmt.Sprintf("attempted to rewrite jump target to a different return instruction: from={pc=%d: %v}, to={pc=%d: %v}", fromPC, fromIns, toPC, toIns)) 378 } 379 // Scan once, and populate `rewriteOps` as a list of rewrite operations 380 // that should be run if the rewrite is feasible. 381 rewriteOps = rewriteOps[:0] 382 for pc := 0; pc < fromPC; pc++ { 383 ins := insns[pc] 384 // Note: `neededOffset` may be negative, in case where we are rewriting 385 // the jump target to go to an earlier instruction, and we are dealing 386 // with the instructions that come after that. 387 // This isn't necessarily a dealbreaker, we just need to make sure that 388 // `ins` is either not a jump statement, or it is a jump statement that 389 // doesn't go to `fromPC` (otherwise, only then would it need to jump 390 // backwards). 391 neededOffset := toPC - pc - 1 392 if ins.IsConditionalJump() { 393 if jumpTrueTarget := pc + int(ins.JumpIfTrue) + 1; jumpTrueTarget == fromPC { 394 if neededOffset < 0 || neededOffset > maxConditionalJumpOffset { 395 return false 396 } 397 rewriteOps = append(rewriteOps, jumpRewriteOperation{ 398 pc: pc, 399 jumpType: JumpTrue, 400 rewriteTo: neededOffset, 401 }) 402 } 403 if jumpFalseTarget := pc + int(ins.JumpIfFalse) + 1; jumpFalseTarget == fromPC { 404 if neededOffset < 0 || neededOffset > maxConditionalJumpOffset { 405 return false 406 } 407 rewriteOps = append(rewriteOps, jumpRewriteOperation{ 408 pc: pc, 409 jumpType: JumpFalse, 410 rewriteTo: neededOffset, 411 }) 412 } 413 } else if ins.IsUnconditionalJump() { 414 if jumpTarget := pc + int(ins.K) + 1; jumpTarget == fromPC { 415 if neededOffset < 0 || neededOffset > maxUnconditionalJumpOffset { 416 return false 417 } 418 rewriteOps = append(rewriteOps, jumpRewriteOperation{ 419 pc: pc, 420 jumpType: JumpDirect, 421 rewriteTo: neededOffset, 422 }) 423 } 424 } 425 } 426 if len(rewriteOps) == 0 { 427 return false // No jump statements to rewrite. 428 } 429 // Rewrite is feasible, so do it. 430 for _, op := range rewriteOps { 431 ins := insns[op.pc] 432 switch op.jumpType { 433 case JumpTrue: 434 ins.JumpIfTrue = uint8(op.rewriteTo) 435 case JumpFalse: 436 ins.JumpIfFalse = uint8(op.rewriteTo) 437 case JumpDirect: 438 ins.K = uint32(op.rewriteTo) 439 } 440 insns[op.pc] = ins 441 } 442 return true 443 } 444 445 // optimizeJumpsToSmallestSetOfReturns modifies jump targets that go to 446 // return statements to go to an identical return statement (which still 447 // fits within the maximum jump offsets), with the goal of minimizing the 448 // total number of such return statements needed within the program overall. 449 // The return statements that are skipped this way can then be removed by 450 // the `removeDeadCode` optimizer, which should come earlier in the 451 // optimizer list to ensure this optimizer only runs on instructions with 452 // no dead code in them. 453 // Within binary search trees, this allows deduplicating return statements 454 // across multiple conditions and makes them much shorter. In turn, this 455 // allows pruning these redundant return instructions as 456 // they become dead, and therefore makes the code shorter. 457 // (Essentially, we create a common "jump to return" doormat that everyone in 458 // Office Space^W^W^W^W any instruction in range can jump to.) 459 // 460 // Conceptually: 461 // 462 // .. if (foo) goto A else goto B 463 // A: return rejected 464 // B: if (bar) goto C else goto D 465 // C: return rejected 466 // D: if (baz) goto E else goto F 467 // E: return rejected 468 // F: return accepted 469 // ... 470 // (Another set of rules in the program): 471 // .. if (foo2) goto G else goto H 472 // G: return accepted 473 // H: if (bar2) goto I else goto J 474 // I: return accepted 475 // J: return rejected 476 // 477 // becomes (after the dead code removal optimizer runs as well): 478 // 479 // .. if (foo) goto J else goto B 480 // B: if (bar) goto J else goto D 481 // D: if (baz) goto J else goto I 482 // ... 483 // .. if (foo2) goto I else goto H 484 // H: if (bar2) goto I else goto J 485 // I: return accepted 486 // J: return rejected 487 func optimizeJumpsToSmallestSetOfReturns(insns []Instruction) ([]Instruction, bool) { 488 // This is probably an NP-complete problem, so this approach does not 489 // attempt to be optimal. Not being optimal is OK, we just end up with 490 // a program that's slightly longer than necessary. 491 // Rough sketch of the algorithm: 492 // For each return instruction in the program: 493 // Count the number of jump instructions that flow to it ("popularity"). 494 // Also add `len(insns)` to the count if the instruction just before 495 // the return instruction is neither a jump or a return instruction, 496 // as the program can also flow through to it. This makes the return 497 // instruction non-removable, but that in turn means that it is a very 498 // good target for other jumps to jump to. 499 // Build a map of lists of return instructions sorted by how many other 500 // instructions flow to it, in ascending order. 501 // The map key is the return value of the return instruction. 502 // Iterate over this map (for each possible return value): 503 // Iterate over the list of return instructions that return this value: 504 // If the return instruction is unreachable, skip it. 505 // If the return instruction is reachable by fallthrough (i.e. the 506 // instruction just before it is not a jump nor a return), skip it. 507 // Otherwise, see if it's possible to move all jump targets of this 508 // instruction to any other return instruction in the list (starting 509 // from the end of the sorted list, i.e. the "most popular" return 510 // instruction that returns the same value), without needing to 511 // convert conditional jumps into unconditional ones. 512 // If it's possible, move all jump targets to it. 513 // We may redundantly update multiple jump targets in one go which may be 514 // optimized further in later passes (e.g. if unconditional jumps can be 515 // removed and trim the program further, expanding the set of possible 516 // rewrites beyond what we considered in this pass), but that's OK. 517 // This pass will run again afterwards and eventually pick them up, and this 518 // is still more efficient over running this (expensive) pass after each 519 // single rewrite happens. 520 changed := false 521 522 // retPopularity maps offsets (pc) of return instructions to the number of 523 // jump targets that point to them, +numInstructions if the program can also 524 // fall through to it. 525 numInstructions := len(insns) 526 retPopularity := make([]int, numInstructions) 527 528 // retCanBeFallenThrough maps offsets (pc) of return instructions to whether 529 // or not they can be fallen through (i.e. not jumped to). 530 retCanBeFallenThrough := make([]bool, numInstructions) 531 532 // retValueToPC maps return values to a set of instructions that return 533 // that value. 534 // In BPF, the value of the K register is part of the return instruction 535 // itself ("immediate" in assembly parlance), whereas the A register is 536 // more of a regular register (previous operations may store/load/modify 537 // it). So any return statement that returns the value of the A register 538 // is functionally identical to any other, but any return statement that 539 // returns the value of the K register must have the same value of K in 540 // the return instruction for it to be functionally equivalent. 541 // So, for return instructions that return K, we use the immediate value 542 // of the K register (which is a uint32), and for return instructions 543 // that return the A register, we use the stand-in value 544 // "0xaaaaaaaaaaaaaaaa" (which doesn't fit in uint32, so it can't conflict 545 // with an immediate value of K). 546 const retRegisterA = 0xaaaaaaaaaaaaaaaa 547 retValueToPC := make(map[uint64][]int) 548 549 for pc, ins := range insns { 550 if !ins.IsReturn() { 551 continue // Not a conditional jump instruction. 552 } 553 var retValue uint64 554 switch ins.OpCode - Ret { 555 case A: 556 retValue = retRegisterA 557 case K: 558 retValue = uint64(ins.K) 559 default: 560 panic(fmt.Sprintf("unknown return value in instruction at pc=%d: %v", pc, ins)) 561 } 562 popularity := 0 563 canBeFallenThrough := false 564 for pc2 := 0; pc2 < pc; pc2++ { 565 ins2 := insns[pc2] 566 switch ins2.OpCode & instructionClassMask { 567 case Ret: 568 // Do nothing. 569 case Jmp: 570 if ins2.IsConditionalJump() { 571 // Note that the optimizeSameTargetConditionalJumps should make it 572 // such that it's not possible for there to be a conditional jump 573 // with identical "true" and "false" targets, so this should not 574 // result in adding 2 to `popularity`. 575 if jumpTrueTarget := pc2 + int(ins2.JumpIfTrue) + 1; jumpTrueTarget == pc { 576 popularity++ 577 } 578 if jumpFalseTarget := pc2 + int(ins2.JumpIfFalse) + 1; jumpFalseTarget == pc { 579 popularity++ 580 } 581 } else { 582 if jumpTarget := pc2 + int(ins2.K) + 1; jumpTarget == pc { 583 popularity++ 584 } 585 } 586 default: 587 if pc2 == pc-1 { 588 // This return instruction can be fallen through to. 589 popularity += numInstructions 590 canBeFallenThrough = true 591 } 592 } 593 } 594 retValueToPC[retValue] = append(retValueToPC[retValue], pc) 595 retPopularity[pc] = popularity 596 retCanBeFallenThrough[pc] = canBeFallenThrough 597 } 598 599 rewriteOps := make([]jumpRewriteOperation, 0, len(insns)) 600 for _, pcs := range retValueToPC { 601 sort.Slice(pcs, func(i, j int) bool { 602 // Sort `pcs` in order of ascending popularity. 603 // If the popularity is the same, sort by PC. 604 if retPopularity[pcs[i]] != retPopularity[pcs[j]] { 605 return retPopularity[pcs[i]] < retPopularity[pcs[j]] 606 } 607 return pcs[i] < pcs[j] 608 }) 609 for i, unpopularPC := range pcs { 610 if retCanBeFallenThrough[unpopularPC] { 611 // Can't remove this return instruction, so no need to try 612 // to check if we can rewrite other instructions that jump to it. 613 continue 614 } 615 for j := len(pcs) - 1; j > i; j-- { 616 popularPC := pcs[j] 617 // Check if we can rewrite all instructions that jump to `unpopularPC` 618 // to instead jump to `popularPC`. 619 if rewriteAllJumpsToReturn(insns, unpopularPC, popularPC, rewriteOps) { 620 changed = true 621 } 622 } 623 } 624 } 625 return insns, changed 626 } 627 628 // Optimize losslessly optimizes a BPF program using the given optimization 629 // functions. 630 // Optimizers should be ranked in order of importance, with the most 631 // important first. 632 // An optimizer will be exhausted before the next one is ever run. 633 // Earlier optimizers are re-exhausted if later optimizers cause change. 634 // The BPF instructions are assumed to have been checked for validity and 635 // consistency. 636 // The instructions in `insns` may be modified in-place. 637 func optimize(insns []Instruction, funcs []optimizerFunc) []Instruction { 638 for changed := true; changed; { 639 for _, fn := range funcs { 640 if insns, changed = fn(insns); changed { 641 break 642 } 643 } 644 } 645 return insns 646 } 647 648 // Optimize losslessly optimizes a BPF program. 649 // The BPF instructions are assumed to have been checked for validity and 650 // consistency. 651 // The instructions in `insns` may be modified in-place. 652 func Optimize(insns []Instruction) []Instruction { 653 return optimize(insns, []optimizerFunc{ 654 optimizeConditionalJumps, 655 optimizeSameTargetConditionalJumps, 656 optimizeUnconditionalJumps, 657 optimizeJumpsToReturn, 658 removeZeroInstructionJumps, 659 removeDeadCode, 660 removeRedundantLoads, 661 optimizeJumpsToSmallestSetOfReturns, 662 }) 663 }