github.com/nicocha30/gvisor-ligolo@v0.0.0-20230726075806-989fa2c0a413/pkg/seccomp/seccomp.go (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Package seccomp provides generation of basic seccomp filters. Currently,
    16  // only little endian systems are supported.
    17  package seccomp
    18  
    19  import (
    20  	"fmt"
    21  	"reflect"
    22  	"sort"
    23  
    24  	"github.com/nicocha30/gvisor-ligolo/pkg/abi/linux"
    25  	"github.com/nicocha30/gvisor-ligolo/pkg/bpf"
    26  	"github.com/nicocha30/gvisor-ligolo/pkg/log"
    27  )
    28  
    29  const (
    30  	// skipOneInst is the offset to take for skipping one instruction.
    31  	skipOneInst = 1
    32  
    33  	// defaultLabel is the label for the default action.
    34  	defaultLabel = "default_action"
    35  )
    36  
    37  // NonNegativeFDCheck ensures an FD argument is a non-negative int.
    38  func NonNegativeFDCheck() LessThanOrEqual {
    39  	// Negative int32 has the MSB (31st bit) set. So the raw uint FD value must
    40  	// be less than or equal to 0x7fffffff.
    41  	return LessThanOrEqual(0x7fffffff)
    42  }
    43  
    44  // Install generates BPF code based on the set of syscalls provided. It only
    45  // allows syscalls that conform to the specification. Syscalls that violate the
    46  // specification will trigger RET_KILL_PROCESS. If RET_KILL_PROCESS is not
    47  // supported, violations will trigger RET_TRAP instead. RET_KILL_THREAD is not
    48  // used because it only kills the offending thread and often keeps the sentry
    49  // hanging.
    50  //
    51  // denyRules describes forbidden syscalls. rules describes allowed syscalls.
    52  // denyRules is executed before rules.
    53  //
    54  // Be aware that RET_TRAP sends SIGSYS to the process and it may be ignored,
    55  // making it possible for the process to continue running after a violation.
    56  // However, it will leave a SECCOMP audit event trail behind. In any case, the
    57  // syscall is still blocked from executing.
    58  func Install(rules SyscallRules, denyRules SyscallRules) error {
    59  	defaultAction, err := defaultAction()
    60  	if err != nil {
    61  		return err
    62  	}
    63  
    64  	// ***   DEBUG TIP   ***
    65  	// If you suspect the process is getting killed due to a seccomp violation, uncomment the line
    66  	// below to get a panic stack trace when there is a violation.
    67  	// defaultAction = linux.BPFAction(linux.SECCOMP_RET_TRAP)
    68  
    69  	log.Infof("Installing seccomp filters for %d syscalls (action=%v)", len(rules), defaultAction)
    70  
    71  	instrs, err := BuildProgram([]RuleSet{
    72  		{
    73  			Rules:  denyRules,
    74  			Action: defaultAction,
    75  		},
    76  		{
    77  			Rules:  rules,
    78  			Action: linux.SECCOMP_RET_ALLOW,
    79  		},
    80  	}, defaultAction, defaultAction)
    81  	if log.IsLogging(log.Debug) {
    82  		programStr, errDecode := bpf.DecodeInstructions(instrs)
    83  		if errDecode != nil {
    84  			programStr = fmt.Sprintf("Error: %v\n%s", errDecode, programStr)
    85  		}
    86  		log.Debugf("Seccomp program dump:\n%s", programStr)
    87  	}
    88  	if err != nil {
    89  		return err
    90  	}
    91  
    92  	// Perform the actual installation.
    93  	if err := SetFilter(instrs); err != nil {
    94  		return fmt.Errorf("failed to set filter: %v", err)
    95  	}
    96  
    97  	log.Infof("Seccomp filters installed.")
    98  	return nil
    99  }
   100  
   101  func defaultAction() (linux.BPFAction, error) {
   102  	available, err := isKillProcessAvailable()
   103  	if err != nil {
   104  		return 0, err
   105  	}
   106  	if available {
   107  		return linux.SECCOMP_RET_KILL_PROCESS, nil
   108  	}
   109  	return linux.SECCOMP_RET_TRAP, nil
   110  }
   111  
   112  // RuleSet is a set of rules and associated action.
   113  type RuleSet struct {
   114  	Rules  SyscallRules
   115  	Action linux.BPFAction
   116  
   117  	// Vsyscall indicates that a check is made for a function being called
   118  	// from kernel mappings. This is where the vsyscall page is located
   119  	// (and typically) emulated, so this RuleSet will not match any
   120  	// functions not dispatched from the vsyscall page.
   121  	Vsyscall bool
   122  }
   123  
   124  // SyscallName gives names to system calls. It is used purely for debugging purposes.
   125  //
   126  // An alternate namer can be provided to the package at initialization time.
   127  var SyscallName = func(sysno uintptr) string {
   128  	return fmt.Sprintf("syscall_%d", sysno)
   129  }
   130  
   131  // BuildProgram builds a BPF program from the given map of actions to matching
   132  // SyscallRules. The single generated program covers all provided RuleSets.
   133  func BuildProgram(rules []RuleSet, defaultAction, badArchAction linux.BPFAction) ([]linux.BPFInstruction, error) {
   134  	program := bpf.NewProgramBuilder()
   135  
   136  	// Be paranoid and check that syscall is done in the expected architecture.
   137  	//
   138  	// A = seccomp_data.arch
   139  	// if (A != AUDIT_ARCH) goto defaultAction.
   140  	program.AddStmt(bpf.Ld|bpf.Abs|bpf.W, seccompDataOffsetArch)
   141  	// defaultLabel is at the bottom of the program. The size of program
   142  	// may exceeds 255 lines, which is the limit of a condition jump.
   143  	program.AddJump(bpf.Jmp|bpf.Jeq|bpf.K, LINUX_AUDIT_ARCH, skipOneInst, 0)
   144  	program.AddStmt(bpf.Ret|bpf.K, uint32(badArchAction))
   145  	if err := buildIndex(rules, program); err != nil {
   146  		return nil, err
   147  	}
   148  
   149  	// Exhausted: return defaultAction.
   150  	if err := program.AddLabel(defaultLabel); err != nil {
   151  		return nil, err
   152  	}
   153  	program.AddStmt(bpf.Ret|bpf.K, uint32(defaultAction))
   154  
   155  	return program.Instructions()
   156  }
   157  
   158  // buildIndex builds a BST to quickly search through all syscalls.
   159  func buildIndex(rules []RuleSet, program *bpf.ProgramBuilder) error {
   160  	// Do nothing if rules is empty.
   161  	if len(rules) == 0 {
   162  		return nil
   163  	}
   164  
   165  	// Build a list of all application system calls, across all given rule
   166  	// sets. We have a simple BST, but may dispatch individual matchers
   167  	// with different actions. The matchers are evaluated linearly.
   168  	requiredSyscalls := make(map[uintptr]struct{})
   169  	for _, rs := range rules {
   170  		for sysno := range rs.Rules {
   171  			requiredSyscalls[sysno] = struct{}{}
   172  		}
   173  	}
   174  	syscalls := make([]uintptr, 0, len(requiredSyscalls))
   175  	for sysno := range requiredSyscalls {
   176  		syscalls = append(syscalls, sysno)
   177  	}
   178  	sort.Slice(syscalls, func(i, j int) bool { return syscalls[i] < syscalls[j] })
   179  	for _, sysno := range syscalls {
   180  		for _, rs := range rules {
   181  			// Print only if there is a corresponding set of rules.
   182  			if _, ok := rs.Rules[sysno]; ok {
   183  				log.Debugf("syscall filter %v: %s => 0x%x", SyscallName(sysno), rs.Rules[sysno], rs.Action)
   184  			}
   185  		}
   186  	}
   187  
   188  	root := createBST(syscalls)
   189  	root.root = true
   190  
   191  	// Load syscall number into A and run through BST.
   192  	//
   193  	// A = seccomp_data.nr
   194  	program.AddStmt(bpf.Ld|bpf.Abs|bpf.W, seccompDataOffsetNR)
   195  	return root.traverse(buildBSTProgram, rules, program)
   196  }
   197  
   198  // createBST converts sorted syscall slice into a balanced BST.
   199  // Panics if syscalls is empty.
   200  func createBST(syscalls []uintptr) *node {
   201  	i := len(syscalls) / 2
   202  	parent := node{value: syscalls[i]}
   203  	if i > 0 {
   204  		parent.left = createBST(syscalls[:i])
   205  	}
   206  	if i+1 < len(syscalls) {
   207  		parent.right = createBST(syscalls[i+1:])
   208  	}
   209  	return &parent
   210  }
   211  
   212  func vsyscallViolationLabel(ruleSetIdx int, sysno uintptr) string {
   213  	return fmt.Sprintf("vsyscallViolation_%v_%v", ruleSetIdx, sysno)
   214  }
   215  
   216  func ruleViolationLabel(ruleSetIdx int, sysno uintptr, idx int) string {
   217  	return fmt.Sprintf("ruleViolation_%v_%v_%v", ruleSetIdx, sysno, idx)
   218  }
   219  
   220  func ruleLabel(ruleSetIdx int, sysno uintptr, idx int, name string) string {
   221  	return fmt.Sprintf("rule_%v_%v_%v_%v", ruleSetIdx, sysno, idx, name)
   222  }
   223  
   224  func checkArgsLabel(sysno uintptr) string {
   225  	return fmt.Sprintf("checkArgs_%v", sysno)
   226  }
   227  
   228  // addSyscallArgsCheck adds argument checks for a single system call. It does
   229  // not insert a jump to the default action at the end and it is the
   230  // responsibility of the caller to insert an appropriate jump after calling
   231  // this function.
   232  func addSyscallArgsCheck(p *bpf.ProgramBuilder, rules []Rule, action linux.BPFAction, ruleSetIdx int, sysno uintptr) error {
   233  	for ruleidx, rule := range rules {
   234  		labelled := false
   235  		for i, arg := range rule {
   236  			if arg != nil {
   237  				// Break out early if using MatchAny since no further
   238  				// instructions are required.
   239  				if _, ok := arg.(MatchAny); ok {
   240  					continue
   241  				}
   242  
   243  				// Determine the data offset for low and high bits of input.
   244  				dataOffsetLow := seccompDataOffsetArgLow(i)
   245  				dataOffsetHigh := seccompDataOffsetArgHigh(i)
   246  				if i == RuleIP {
   247  					dataOffsetLow = seccompDataOffsetIPLow
   248  					dataOffsetHigh = seccompDataOffsetIPHigh
   249  				}
   250  
   251  				// Add the conditional operation. Input values to the BPF
   252  				// program are 64bit values.  However, comparisons in BPF can
   253  				// only be done on 32bit values. This means that we need to do
   254  				// multiple BPF comparisons in order to do one logical 64bit
   255  				// comparison.
   256  				switch a := arg.(type) {
   257  				case EqualTo:
   258  					// EqualTo checks that both the higher and lower 32bits are equal.
   259  					high, low := uint32(a>>32), uint32(a)
   260  
   261  					// Assert that the lower 32bits are equal.
   262  					// arg_low == low ? continue : violation
   263  					p.AddStmt(bpf.Ld|bpf.Abs|bpf.W, dataOffsetLow)
   264  					p.AddJumpFalseLabel(bpf.Jmp|bpf.Jeq|bpf.K, low, 0, ruleViolationLabel(ruleSetIdx, sysno, ruleidx))
   265  
   266  					// Assert that the lower 32bits are also equal.
   267  					// arg_high == high ? continue/success : violation
   268  					p.AddStmt(bpf.Ld|bpf.Abs|bpf.W, dataOffsetHigh)
   269  					p.AddJumpFalseLabel(bpf.Jmp|bpf.Jeq|bpf.K, high, 0, ruleViolationLabel(ruleSetIdx, sysno, ruleidx))
   270  					labelled = true
   271  				case NotEqual:
   272  					// NotEqual checks that either the higher or lower 32bits
   273  					// are *not* equal.
   274  					high, low := uint32(a>>32), uint32(a)
   275  					labelGood := fmt.Sprintf("ne%v", i)
   276  
   277  					// Check if the higher 32bits are (not) equal.
   278  					// arg_low == low ? continue : success
   279  					p.AddStmt(bpf.Ld|bpf.Abs|bpf.W, dataOffsetLow)
   280  					p.AddJumpFalseLabel(bpf.Jmp|bpf.Jeq|bpf.K, low, 0, ruleLabel(ruleSetIdx, sysno, ruleidx, labelGood))
   281  
   282  					// Assert that the lower 32bits are not equal (assuming
   283  					// higher bits are equal).
   284  					// arg_high == high ? violation : continue/success
   285  					p.AddStmt(bpf.Ld|bpf.Abs|bpf.W, dataOffsetHigh)
   286  					p.AddJumpTrueLabel(bpf.Jmp|bpf.Jeq|bpf.K, high, ruleViolationLabel(ruleSetIdx, sysno, ruleidx), 0)
   287  					p.AddLabel(ruleLabel(ruleSetIdx, sysno, ruleidx, labelGood))
   288  					labelled = true
   289  				case GreaterThan:
   290  					// GreaterThan checks that the higher 32bits is greater
   291  					// *or* that the higher 32bits are equal and the lower
   292  					// 32bits are greater.
   293  					high, low := uint32(a>>32), uint32(a)
   294  					labelGood := fmt.Sprintf("gt%v", i)
   295  
   296  					// Assert the higher 32bits are greater than or equal.
   297  					// arg_high >= high ? continue : violation (arg_high < high)
   298  					p.AddStmt(bpf.Ld|bpf.Abs|bpf.W, dataOffsetHigh)
   299  					p.AddJumpFalseLabel(bpf.Jmp|bpf.Jge|bpf.K, high, 0, ruleViolationLabel(ruleSetIdx, sysno, ruleidx))
   300  
   301  					// Assert that the lower 32bits are greater.
   302  					// arg_high == high ? continue : success (arg_high > high)
   303  					p.AddJumpFalseLabel(bpf.Jmp|bpf.Jeq|bpf.K, high, 0, ruleLabel(ruleSetIdx, sysno, ruleidx, labelGood))
   304  					// arg_low > low ? continue/success : violation (arg_high == high and arg_low <= low)
   305  					p.AddStmt(bpf.Ld|bpf.Abs|bpf.W, dataOffsetLow)
   306  					p.AddJumpFalseLabel(bpf.Jmp|bpf.Jgt|bpf.K, low, 0, ruleViolationLabel(ruleSetIdx, sysno, ruleidx))
   307  					p.AddLabel(ruleLabel(ruleSetIdx, sysno, ruleidx, labelGood))
   308  					labelled = true
   309  				case GreaterThanOrEqual:
   310  					// GreaterThanOrEqual checks that the higher 32bits is
   311  					// greater *or* that the higher 32bits are equal and the
   312  					// lower 32bits are greater than or equal.
   313  					high, low := uint32(a>>32), uint32(a)
   314  					labelGood := fmt.Sprintf("ge%v", i)
   315  
   316  					// Assert the higher 32bits are greater than or equal.
   317  					// arg_high >= high ? continue : violation (arg_high < high)
   318  					p.AddStmt(bpf.Ld|bpf.Abs|bpf.W, dataOffsetHigh)
   319  					p.AddJumpFalseLabel(bpf.Jmp|bpf.Jge|bpf.K, high, 0, ruleViolationLabel(ruleSetIdx, sysno, ruleidx))
   320  					// arg_high == high ? continue : success (arg_high > high)
   321  					p.AddJumpFalseLabel(bpf.Jmp|bpf.Jeq|bpf.K, high, 0, ruleLabel(ruleSetIdx, sysno, ruleidx, labelGood))
   322  
   323  					// Assert that the lower 32bits are greater (assuming the
   324  					// higher bits are equal).
   325  					// arg_low >= low ? continue/success : violation (arg_high == high and arg_low < low)
   326  					p.AddStmt(bpf.Ld|bpf.Abs|bpf.W, dataOffsetLow)
   327  					p.AddJumpFalseLabel(bpf.Jmp|bpf.Jge|bpf.K, low, 0, ruleViolationLabel(ruleSetIdx, sysno, ruleidx))
   328  					p.AddLabel(ruleLabel(ruleSetIdx, sysno, ruleidx, labelGood))
   329  					labelled = true
   330  				case LessThan:
   331  					// LessThan checks that the higher 32bits is less *or* that
   332  					// the higher 32bits are equal and the lower 32bits are
   333  					// less.
   334  					high, low := uint32(a>>32), uint32(a)
   335  					labelGood := fmt.Sprintf("lt%v", i)
   336  
   337  					// Assert the higher 32bits are less than or equal.
   338  					// arg_high > high ? violation : continue
   339  					p.AddStmt(bpf.Ld|bpf.Abs|bpf.W, dataOffsetHigh)
   340  					p.AddJumpTrueLabel(bpf.Jmp|bpf.Jgt|bpf.K, high, ruleViolationLabel(ruleSetIdx, sysno, ruleidx), 0)
   341  					// arg_high == high ? continue : success (arg_high < high)
   342  					p.AddJumpFalseLabel(bpf.Jmp|bpf.Jeq|bpf.K, high, 0, ruleLabel(ruleSetIdx, sysno, ruleidx, labelGood))
   343  
   344  					// Assert that the lower 32bits are less (assuming the
   345  					// higher bits are equal).
   346  					// arg_low >= low ? violation : continue
   347  					p.AddStmt(bpf.Ld|bpf.Abs|bpf.W, dataOffsetLow)
   348  					p.AddJumpTrueLabel(bpf.Jmp|bpf.Jge|bpf.K, low, ruleViolationLabel(ruleSetIdx, sysno, ruleidx), 0)
   349  					p.AddLabel(ruleLabel(ruleSetIdx, sysno, ruleidx, labelGood))
   350  					labelled = true
   351  				case LessThanOrEqual:
   352  					// LessThan checks that the higher 32bits is less *or* that
   353  					// the higher 32bits are equal and the lower 32bits are
   354  					// less than or equal.
   355  					high, low := uint32(a>>32), uint32(a)
   356  					labelGood := fmt.Sprintf("le%v", i)
   357  
   358  					// Assert the higher 32bits are less than or equal.
   359  					// assert arg_high > high ? violation : continue
   360  					p.AddStmt(bpf.Ld|bpf.Abs|bpf.W, dataOffsetHigh)
   361  					p.AddJumpTrueLabel(bpf.Jmp|bpf.Jgt|bpf.K, high, ruleViolationLabel(ruleSetIdx, sysno, ruleidx), 0)
   362  					// arg_high == high ? continue : success
   363  					p.AddJumpFalseLabel(bpf.Jmp|bpf.Jeq|bpf.K, high, 0, ruleLabel(ruleSetIdx, sysno, ruleidx, labelGood))
   364  
   365  					// Assert the lower bits are less than or equal (assuming
   366  					// the higher bits are equal).
   367  					// arg_low > low ? violation : success
   368  					p.AddStmt(bpf.Ld|bpf.Abs|bpf.W, dataOffsetLow)
   369  					p.AddJumpTrueLabel(bpf.Jmp|bpf.Jgt|bpf.K, low, ruleViolationLabel(ruleSetIdx, sysno, ruleidx), 0)
   370  					p.AddLabel(ruleLabel(ruleSetIdx, sysno, ruleidx, labelGood))
   371  					labelled = true
   372  				case maskedEqual:
   373  					// MaskedEqual checks that the bitwise AND of the value and
   374  					// mask are equal for both the higher and lower 32bits.
   375  					high, low := uint32(a.value>>32), uint32(a.value)
   376  					maskHigh, maskLow := uint32(a.mask>>32), uint32(a.mask)
   377  
   378  					// Assert that the lower 32bits are equal when masked.
   379  					// A <- arg_low.
   380  					p.AddStmt(bpf.Ld|bpf.Abs|bpf.W, dataOffsetLow)
   381  					// A <- arg_low & maskLow
   382  					p.AddStmt(bpf.Alu|bpf.And|bpf.K, maskLow)
   383  					// Assert that arg_low & maskLow == low.
   384  					p.AddJumpFalseLabel(bpf.Jmp|bpf.Jeq|bpf.K, low, 0, ruleViolationLabel(ruleSetIdx, sysno, ruleidx))
   385  
   386  					// Assert that the higher 32bits are equal when masked.
   387  					// A <- arg_high
   388  					p.AddStmt(bpf.Ld|bpf.Abs|bpf.W, dataOffsetHigh)
   389  					// A <- arg_high & maskHigh
   390  					p.AddStmt(bpf.Alu|bpf.And|bpf.K, maskHigh)
   391  					// Assert that arg_high & maskHigh == high.
   392  					p.AddJumpFalseLabel(bpf.Jmp|bpf.Jeq|bpf.K, high, 0, ruleViolationLabel(ruleSetIdx, sysno, ruleidx))
   393  					labelled = true
   394  				default:
   395  					return fmt.Errorf("unknown syscall rule type: %v", reflect.TypeOf(a))
   396  				}
   397  			}
   398  		}
   399  
   400  		// Matched, emit the given action.
   401  		p.AddStmt(bpf.Ret|bpf.K, uint32(action))
   402  
   403  		// Label the end of the rule if necessary. This is added for
   404  		// the jumps above when the argument check fails.
   405  		if labelled {
   406  			if err := p.AddLabel(ruleViolationLabel(ruleSetIdx, sysno, ruleidx)); err != nil {
   407  				return err
   408  			}
   409  		}
   410  	}
   411  
   412  	return nil
   413  }
   414  
   415  // buildBSTProgram converts a binary tree started in 'root' into BPF code. The outline of the code
   416  // is as follows:
   417  //
   418  // // SYS_PIPE(22), root
   419  //
   420  //	(A == 22) ? goto argument check : continue
   421  //	(A > 22) ? goto index_35 : goto index_9
   422  //
   423  // index_9:  // SYS_MMAP(9), leaf
   424  //
   425  //	A == 9) ? goto argument check : defaultLabel
   426  //
   427  // index_35:  // SYS_NANOSLEEP(35), single child
   428  //
   429  //	(A == 35) ? goto argument check : continue
   430  //	(A > 35) ? goto index_50 : goto defaultLabel
   431  //
   432  // index_50:  // SYS_LISTEN(50), leaf
   433  //
   434  //	(A == 50) ? goto argument check : goto defaultLabel
   435  func buildBSTProgram(n *node, rules []RuleSet, program *bpf.ProgramBuilder) error {
   436  	// Root node is never referenced by label, skip it.
   437  	if !n.root {
   438  		if err := program.AddLabel(n.label()); err != nil {
   439  			return err
   440  		}
   441  	}
   442  
   443  	sysno := n.value
   444  	program.AddJumpTrueLabel(bpf.Jmp|bpf.Jeq|bpf.K, uint32(sysno), checkArgsLabel(sysno), 0)
   445  	if n.left == nil && n.right == nil {
   446  		// Leaf nodes don't require extra check.
   447  		program.AddDirectJumpLabel(defaultLabel)
   448  	} else {
   449  		// Non-leaf node. Check which turn to take otherwise. Using direct jumps
   450  		// in case that the offset may exceed the limit of a conditional jump (255)
   451  		program.AddJump(bpf.Jmp|bpf.Jgt|bpf.K, uint32(sysno), 0, skipOneInst)
   452  		program.AddDirectJumpLabel(n.right.label())
   453  		program.AddDirectJumpLabel(n.left.label())
   454  	}
   455  
   456  	if err := program.AddLabel(checkArgsLabel(sysno)); err != nil {
   457  		return err
   458  	}
   459  
   460  	emitted := false
   461  	for ruleSetIdx, rs := range rules {
   462  		if _, ok := rs.Rules[sysno]; ok {
   463  			// If there are no rules, then this will always match.
   464  			// Remember we've done this so that we can emit a
   465  			// sensible error. We can't catch all overlaps, but we
   466  			// can catch this one at least.
   467  			if emitted {
   468  				return fmt.Errorf("unreachable action for %v: 0x%x (rule set %d)", SyscallName(sysno), rs.Action, ruleSetIdx)
   469  			}
   470  
   471  			// Emit a vsyscall check if this rule requires a
   472  			// Vsyscall match. This rule ensures that the top bit
   473  			// is set in the instruction pointer, which is where
   474  			// the vsyscall page will be mapped.
   475  			if rs.Vsyscall {
   476  				program.AddStmt(bpf.Ld|bpf.Abs|bpf.W, seccompDataOffsetIPHigh)
   477  				program.AddJumpFalseLabel(bpf.Jmp|bpf.Jset|bpf.K, 0x80000000, 0, vsyscallViolationLabel(ruleSetIdx, sysno))
   478  			}
   479  
   480  			// Emit matchers.
   481  			if len(rs.Rules[sysno]) == 0 {
   482  				// This is a blanket action.
   483  				program.AddStmt(bpf.Ret|bpf.K, uint32(rs.Action))
   484  				emitted = true
   485  			} else {
   486  				// Add an argument check for these particular
   487  				// arguments. This will continue execution and
   488  				// check the next rule set. We need to ensure
   489  				// that at the very end, we insert a direct
   490  				// jump label for the unmatched case.
   491  				if err := addSyscallArgsCheck(program, rs.Rules[sysno], rs.Action, ruleSetIdx, sysno); err != nil {
   492  					return err
   493  				}
   494  			}
   495  
   496  			// If there was a Vsyscall check for this rule, then we
   497  			// need to add an appropriate label for the jump above.
   498  			if rs.Vsyscall {
   499  				if err := program.AddLabel(vsyscallViolationLabel(ruleSetIdx, sysno)); err != nil {
   500  					return err
   501  				}
   502  			}
   503  		}
   504  	}
   505  
   506  	// Not matched? We only need to insert a jump to the default label if
   507  	// not default action has been emitted for this call.
   508  	if !emitted {
   509  		program.AddDirectJumpLabel(defaultLabel)
   510  	}
   511  
   512  	return nil
   513  }
   514  
   515  // node represents a tree node.
   516  type node struct {
   517  	value uintptr
   518  	left  *node
   519  	right *node
   520  	root  bool
   521  }
   522  
   523  // label returns the label corresponding to this node.
   524  //
   525  // If n is nil, then the defaultLabel is returned.
   526  func (n *node) label() string {
   527  	if n == nil {
   528  		return defaultLabel
   529  	}
   530  	return fmt.Sprintf("index_%v", n.value)
   531  }
   532  
   533  type traverseFunc func(*node, []RuleSet, *bpf.ProgramBuilder) error
   534  
   535  func (n *node) traverse(fn traverseFunc, rules []RuleSet, p *bpf.ProgramBuilder) error {
   536  	if n == nil {
   537  		return nil
   538  	}
   539  	if err := fn(n, rules, p); err != nil {
   540  		return err
   541  	}
   542  	if err := n.left.traverse(fn, rules, p); err != nil {
   543  		return err
   544  	}
   545  	return n.right.traverse(fn, rules, p)
   546  }