code-intelligence.com/cifuzz@v0.40.0/third-party/minijail/bpf.c (about)

     1  /* Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
     2   * Use of this source code is governed by a BSD-style license that can be
     3   * found in the LICENSE file.
     4   */
     5  
     6  #include <stdint.h>
     7  #include <stdio.h>
     8  #include <stdlib.h>
     9  #include <string.h>
    10  
    11  #include "bpf.h"
    12  #include "util.h"
    13  
    14  /* Architecture validation. */
    15  size_t bpf_validate_arch(struct sock_filter *filter)
    16  {
    17  	struct sock_filter *curr_block = filter;
    18  	set_bpf_stmt(curr_block++, BPF_LD + BPF_W + BPF_ABS, arch_nr);
    19  	set_bpf_jump(curr_block++, BPF_JMP + BPF_JEQ + BPF_K, MINIJAIL_ARCH_NR,
    20  		     SKIP, NEXT);
    21  	set_bpf_ret_kill(curr_block++);
    22  	return curr_block - filter;
    23  }
    24  
    25  /* Syscall number eval functions. */
    26  size_t bpf_allow_syscall(struct sock_filter *filter, int nr)
    27  {
    28  	struct sock_filter *curr_block = filter;
    29  	set_bpf_jump(curr_block++, BPF_JMP + BPF_JEQ + BPF_K, nr, NEXT, SKIP);
    30  	set_bpf_stmt(curr_block++, BPF_RET + BPF_K, SECCOMP_RET_ALLOW);
    31  	return curr_block - filter;
    32  }
    33  
    34  size_t bpf_allow_syscall_args(struct sock_filter *filter, int nr,
    35  			      unsigned int id)
    36  {
    37  	struct sock_filter *curr_block = filter;
    38  	set_bpf_jump(curr_block++, BPF_JMP + BPF_JEQ + BPF_K, nr, NEXT, SKIP);
    39  	set_bpf_jump_lbl(curr_block++, id);
    40  	return curr_block - filter;
    41  }
    42  
    43  /* Size-aware arg loaders. */
    44  #if defined(BITS32)
    45  size_t bpf_load_arg(struct sock_filter *filter, int argidx)
    46  {
    47  	set_bpf_stmt(filter, BPF_LD + BPF_W + BPF_ABS, LO_ARG(argidx));
    48  	return 1U;
    49  }
    50  #elif defined(BITS64)
    51  size_t bpf_load_arg(struct sock_filter *filter, int argidx)
    52  {
    53  	struct sock_filter *curr_block = filter;
    54  	set_bpf_stmt(curr_block++, BPF_LD + BPF_W + BPF_ABS, LO_ARG(argidx));
    55  	set_bpf_stmt(curr_block++, BPF_ST, 0); /* lo -> M[0] */
    56  	set_bpf_stmt(curr_block++, BPF_LD + BPF_W + BPF_ABS, HI_ARG(argidx));
    57  	set_bpf_stmt(curr_block++, BPF_ST, 1); /* hi -> M[1] */
    58  	return curr_block - filter;
    59  }
    60  #endif
    61  
    62  /* Size-aware comparisons. */
    63  size_t bpf_comp_jeq32(struct sock_filter *filter, unsigned long c,
    64  		      unsigned char jt, unsigned char jf)
    65  {
    66  	unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
    67  	set_bpf_jump(filter, BPF_JMP + BPF_JEQ + BPF_K, lo, jt, jf);
    68  	return 1U;
    69  }
    70  
    71  /*
    72   * On 64 bits, we have to do two 32-bit comparisons.
    73   * We jump true when *both* comparisons are true.
    74   */
    75  #if defined(BITS64)
    76  size_t bpf_comp_jeq64(struct sock_filter *filter, uint64_t c, unsigned char jt,
    77  		      unsigned char jf)
    78  {
    79  	unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
    80  	unsigned int hi = (unsigned int)(c >> 32);
    81  
    82  	struct sock_filter *curr_block = filter;
    83  
    84  	/* bpf_load_arg leaves |hi| in A */
    85  	curr_block += bpf_comp_jeq32(curr_block, hi, NEXT, SKIPN(2) + jf);
    86  	set_bpf_stmt(curr_block++, BPF_LD + BPF_MEM, 0); /* swap in |lo| */
    87  	curr_block += bpf_comp_jeq32(curr_block, lo, jt, jf);
    88  
    89  	return curr_block - filter;
    90  }
    91  #endif
    92  
    93  size_t bpf_comp_jgt32(struct sock_filter *filter, unsigned long c,
    94  		      unsigned char jt, unsigned char jf)
    95  {
    96  	unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
    97  	set_bpf_jump(filter, BPF_JMP + BPF_JGT + BPF_K, lo, jt, jf);
    98  	return 1U;
    99  }
   100  
   101  size_t bpf_comp_jge32(struct sock_filter *filter, unsigned long c,
   102  		      unsigned char jt, unsigned char jf)
   103  {
   104  	unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
   105  	set_bpf_jump(filter, BPF_JMP + BPF_JGE + BPF_K, lo, jt, jf);
   106  	return 1U;
   107  }
   108  
   109  /*
   110   * On 64 bits, we have to do two/three 32-bit comparisons.
   111   * We jump true when the |hi| comparison is true *or* |hi| is equal and the
   112   * |lo| comparison is true.
   113   */
   114  #if defined(BITS64)
   115  size_t bpf_comp_jgt64(struct sock_filter *filter, uint64_t c, unsigned char jt,
   116  		      unsigned char jf)
   117  {
   118  	unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
   119  	unsigned int hi = (unsigned int)(c >> 32);
   120  
   121  	struct sock_filter *curr_block = filter;
   122  
   123  	/* bpf_load_arg leaves |hi| in A. */
   124  	if (hi == 0) {
   125  		curr_block +=
   126  		    bpf_comp_jgt32(curr_block, hi, SKIPN(2) + jt, NEXT);
   127  	} else {
   128  		curr_block +=
   129  		    bpf_comp_jgt32(curr_block, hi, SKIPN(3) + jt, NEXT);
   130  		curr_block +=
   131  		    bpf_comp_jeq32(curr_block, hi, NEXT, SKIPN(2) + jf);
   132  	}
   133  	set_bpf_stmt(curr_block++, BPF_LD + BPF_MEM, 0); /* swap in |lo| */
   134  	curr_block += bpf_comp_jgt32(curr_block, lo, jt, jf);
   135  
   136  	return curr_block - filter;
   137  }
   138  
   139  size_t bpf_comp_jge64(struct sock_filter *filter, uint64_t c, unsigned char jt,
   140  		      unsigned char jf)
   141  {
   142  	unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
   143  	unsigned int hi = (unsigned int)(c >> 32);
   144  
   145  	struct sock_filter *curr_block = filter;
   146  
   147  	/* bpf_load_arg leaves |hi| in A. */
   148  	if (hi == 0) {
   149  		curr_block +=
   150  		    bpf_comp_jgt32(curr_block, hi, SKIPN(2) + jt, NEXT);
   151  	} else {
   152  		curr_block +=
   153  		    bpf_comp_jgt32(curr_block, hi, SKIPN(3) + jt, NEXT);
   154  		curr_block +=
   155  		    bpf_comp_jeq32(curr_block, hi, NEXT, SKIPN(2) + jf);
   156  	}
   157  	set_bpf_stmt(curr_block++, BPF_LD + BPF_MEM, 0); /* swap in |lo| */
   158  	curr_block += bpf_comp_jge32(curr_block, lo, jt, jf);
   159  
   160  	return curr_block - filter;
   161  }
   162  #endif
   163  
   164  /* Size-aware bitwise AND. */
   165  size_t bpf_comp_jset32(struct sock_filter *filter, unsigned long mask,
   166  		       unsigned char jt, unsigned char jf)
   167  {
   168  	unsigned int mask_lo = (unsigned int)(mask & 0xFFFFFFFF);
   169  	set_bpf_jump(filter, BPF_JMP + BPF_JSET + BPF_K, mask_lo, jt, jf);
   170  	return 1U;
   171  }
   172  
   173  /*
   174   * On 64 bits, we have to do two 32-bit bitwise ANDs.
   175   * We jump true when *either* bitwise AND is true (non-zero).
   176   */
   177  #if defined(BITS64)
   178  size_t bpf_comp_jset64(struct sock_filter *filter, uint64_t mask,
   179  		       unsigned char jt, unsigned char jf)
   180  {
   181  	unsigned int mask_lo = (unsigned int)(mask & 0xFFFFFFFF);
   182  	unsigned int mask_hi = (unsigned int)(mask >> 32);
   183  
   184  	struct sock_filter *curr_block = filter;
   185  
   186  	/* bpf_load_arg leaves |hi| in A */
   187  	curr_block += bpf_comp_jset32(curr_block, mask_hi, SKIPN(2) + jt, NEXT);
   188  	set_bpf_stmt(curr_block++, BPF_LD + BPF_MEM, 0); /* swap in |lo| */
   189  	curr_block += bpf_comp_jset32(curr_block, mask_lo, jt, jf);
   190  
   191  	return curr_block - filter;
   192  }
   193  #endif
   194  
   195  size_t bpf_comp_jin(struct sock_filter *filter, unsigned long mask,
   196  		    unsigned char jt, unsigned char jf)
   197  {
   198  	unsigned long negative_mask = ~mask;
   199  	/*
   200  	 * The mask is negated, so the comparison will be true when the argument
   201  	 * includes a flag that wasn't listed in the original (non-negated)
   202  	 * mask. This would be the failure case, so we switch |jt| and |jf|.
   203  	 */
   204  	return bpf_comp_jset(filter, negative_mask, jf, jt);
   205  }
   206  
   207  static size_t bpf_arg_comp_len(int op, unsigned long c attribute_unused)
   208  {
   209  	/* The comparisons that use gt/ge internally may have extra opcodes. */
   210  	switch (op) {
   211  	case LT:
   212  	case LE:
   213  	case GT:
   214  	case GE:
   215  #if defined(BITS64)
   216  		/*
   217  		 * |c| can only have a high 32-bit part when running on 64 bits.
   218  		 */
   219  		if ((c >> 32) == 0)
   220  			return BPF_ARG_SHORT_GT_GE_COMP_LEN + 1;
   221  #endif
   222  		return BPF_ARG_GT_GE_COMP_LEN + 1;
   223  	default:
   224  		return BPF_ARG_COMP_LEN + 1;
   225  	}
   226  }
   227  
   228  size_t bpf_arg_comp(struct sock_filter **pfilter, int op, int argidx,
   229  		    unsigned long c, unsigned int label_id)
   230  {
   231  	size_t filter_len = bpf_arg_comp_len(op, c);
   232  	struct sock_filter *filter =
   233  	    calloc(filter_len, sizeof(struct sock_filter));
   234  	struct sock_filter *curr_block = filter;
   235  	size_t (*comp_function)(struct sock_filter * filter, unsigned long k,
   236  				unsigned char jt, unsigned char jf);
   237  	int flip = 0;
   238  
   239  	if (!filter) {
   240  		*pfilter = NULL;
   241  		return 0;
   242  	}
   243  
   244  	/* Load arg */
   245  	curr_block += bpf_load_arg(curr_block, argidx);
   246  
   247  	/* Jump type */
   248  	switch (op) {
   249  	case EQ:
   250  		comp_function = bpf_comp_jeq;
   251  		flip = 0;
   252  		break;
   253  	case NE:
   254  		comp_function = bpf_comp_jeq;
   255  		flip = 1;
   256  		break;
   257  	case LT:
   258  		comp_function = bpf_comp_jge;
   259  		flip = 1;
   260  		break;
   261  	case LE:
   262  		comp_function = bpf_comp_jgt;
   263  		flip = 1;
   264  		break;
   265  	case GT:
   266  		comp_function = bpf_comp_jgt;
   267  		flip = 0;
   268  		break;
   269  	case GE:
   270  		comp_function = bpf_comp_jge;
   271  		flip = 0;
   272  		break;
   273  	case SET:
   274  		comp_function = bpf_comp_jset;
   275  		flip = 0;
   276  		break;
   277  	case IN:
   278  		comp_function = bpf_comp_jin;
   279  		flip = 0;
   280  		break;
   281  	default:
   282  		*pfilter = NULL;
   283  		return 0;
   284  	}
   285  
   286  	/*
   287  	 * It's easier for the rest of the code to have the true branch
   288  	 * skip and the false branch fall through.
   289  	 */
   290  	unsigned char jt = flip ? NEXT : SKIP;
   291  	unsigned char jf = flip ? SKIP : NEXT;
   292  	curr_block += comp_function(curr_block, c, jt, jf);
   293  	curr_block += set_bpf_jump_lbl(curr_block, label_id);
   294  
   295  	*pfilter = filter;
   296  	return curr_block - filter;
   297  }
   298  
   299  int bpf_resolve_jumps(struct bpf_labels *labels, struct sock_filter *filter,
   300  		      size_t len)
   301  {
   302  	struct sock_filter *instr;
   303  	size_t i, offset;
   304  
   305  	if (len > BPF_MAXINSNS)
   306  		return -1;
   307  
   308  	/*
   309  	 * Walk it once, backwards, to build the label table and do fixups.
   310  	 * Since backward jumps are disallowed by BPF, this is easy.
   311  	 */
   312  	for (i = 0; i < len; i++) {
   313  		offset = len - i - 1;
   314  		instr = &filter[offset];
   315  		if (instr->code != (BPF_JMP + BPF_JA))
   316  			continue;
   317  		switch ((instr->jt << 8) | instr->jf) {
   318  		case (JUMP_JT << 8) | JUMP_JF:
   319  			if (instr->k >= labels->count) {
   320  				warn("nonexistent label id: %u", instr->k);
   321  				return -1;
   322  			}
   323  			if (labels->labels[instr->k].location == 0xffffffff) {
   324  				warn("unresolved label: '%s'",
   325  				     labels->labels[instr->k].label);
   326  				return -1;
   327  			}
   328  			instr->k =
   329  			    labels->labels[instr->k].location - (offset + 1);
   330  			instr->jt = 0;
   331  			instr->jf = 0;
   332  			continue;
   333  		case (LABEL_JT << 8) | LABEL_JF:
   334  			if (labels->labels[instr->k].location != 0xffffffff) {
   335  				warn("duplicate label: '%s'",
   336  				     labels->labels[instr->k].label);
   337  				return -1;
   338  			}
   339  			labels->labels[instr->k].location = offset;
   340  			instr->k = 0; /* Fall through. */
   341  			instr->jt = 0;
   342  			instr->jf = 0;
   343  			continue;
   344  		}
   345  	}
   346  	return 0;
   347  }
   348  
   349  /* Simple lookup table for labels. */
   350  int bpf_label_id(struct bpf_labels *labels, const char *label)
   351  {
   352  	struct __bpf_label *begin = labels->labels, *end;
   353  	int id;
   354  	if (labels->count == 0) {
   355  		begin->label = strndup(label, MAX_BPF_LABEL_LEN);
   356  		if (!begin->label) {
   357  			return -1;
   358  		}
   359  		begin->location = 0xffffffff;
   360  		labels->count++;
   361  		return 0;
   362  	}
   363  	end = begin + labels->count;
   364  	for (id = 0; begin < end; ++begin, ++id) {
   365  		if (!strcmp(label, begin->label)) {
   366  			return id;
   367  		}
   368  	}
   369  
   370  	/* The label wasn't found. Insert it only if there's space. */
   371  	if (labels->count == BPF_LABELS_MAX) {
   372  		return -1;
   373  	}
   374  	begin->label = strndup(label, MAX_BPF_LABEL_LEN);
   375  	if (!begin->label) {
   376  		return -1;
   377  	}
   378  	begin->location = 0xffffffff;
   379  	labels->count++;
   380  	return id;
   381  }
   382  
   383  void free_label_strings(struct bpf_labels *labels)
   384  {
   385  	if (labels->count == 0)
   386  		return;
   387  
   388  	struct __bpf_label *begin = labels->labels, *end;
   389  
   390  	end = begin + labels->count;
   391  	for (; begin < end; ++begin) {
   392  		if (begin->label)
   393  			free((void *)(begin->label));
   394  	}
   395  
   396  	labels->count = 0;
   397  }