Home | History | Annotate | Download | only in minijail
      1 /* Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
      2  * Use of this source code is governed by a BSD-style license that can be
      3  * found in the LICENSE file.
      4  */
      5 
      6 #include <stdint.h>
      7 #include <stdio.h>
      8 #include <stdlib.h>
      9 #include <string.h>
     10 
     11 #include "bpf.h"
     12 #include "util.h"
     13 
     14 /* Architecture validation. */
     15 size_t bpf_validate_arch(struct sock_filter *filter)
     16 {
     17 	struct sock_filter *curr_block = filter;
     18 	set_bpf_stmt(curr_block++, BPF_LD + BPF_W + BPF_ABS, arch_nr);
     19 	set_bpf_jump(curr_block++, BPF_JMP + BPF_JEQ + BPF_K, ARCH_NR, SKIP,
     20 		     NEXT);
     21 	set_bpf_ret_kill(curr_block++);
     22 	return curr_block - filter;
     23 }
     24 
     25 /* Syscall number eval functions. */
     26 size_t bpf_allow_syscall(struct sock_filter *filter, int nr)
     27 {
     28 	struct sock_filter *curr_block = filter;
     29 	set_bpf_jump(curr_block++, BPF_JMP + BPF_JEQ + BPF_K, nr, NEXT, SKIP);
     30 	set_bpf_stmt(curr_block++, BPF_RET + BPF_K, SECCOMP_RET_ALLOW);
     31 	return curr_block - filter;
     32 }
     33 
     34 size_t bpf_allow_syscall_args(struct sock_filter *filter, int nr,
     35 			      unsigned int id)
     36 {
     37 	struct sock_filter *curr_block = filter;
     38 	set_bpf_jump(curr_block++, BPF_JMP + BPF_JEQ + BPF_K, nr, NEXT, SKIP);
     39 	set_bpf_jump_lbl(curr_block++, id);
     40 	return curr_block - filter;
     41 }
     42 
     43 /* Size-aware arg loaders. */
     44 #if defined(BITS32)
     45 size_t bpf_load_arg(struct sock_filter *filter, int argidx)
     46 {
     47 	set_bpf_stmt(filter, BPF_LD + BPF_W + BPF_ABS, LO_ARG(argidx));
     48 	return 1U;
     49 }
     50 #elif defined(BITS64)
     51 size_t bpf_load_arg(struct sock_filter *filter, int argidx)
     52 {
     53 	struct sock_filter *curr_block = filter;
     54 	set_bpf_stmt(curr_block++, BPF_LD + BPF_W + BPF_ABS, LO_ARG(argidx));
     55 	set_bpf_stmt(curr_block++, BPF_ST, 0); /* lo -> M[0] */
     56 	set_bpf_stmt(curr_block++, BPF_LD + BPF_W + BPF_ABS, HI_ARG(argidx));
     57 	set_bpf_stmt(curr_block++, BPF_ST, 1); /* hi -> M[1] */
     58 	return curr_block - filter;
     59 }
     60 #endif
     61 
     62 /* Size-aware comparisons. */
     63 size_t bpf_comp_jeq32(struct sock_filter *filter, unsigned long c,
     64 		      unsigned char jt, unsigned char jf)
     65 {
     66 	unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
     67 	set_bpf_jump(filter, BPF_JMP + BPF_JEQ + BPF_K, lo, jt, jf);
     68 	return 1U;
     69 }
     70 
     71 /*
     72  * On 64 bits, we have to do two 32-bit comparisons.
     73  * We jump true when *both* comparisons are true.
     74  */
     75 #if defined(BITS64)
     76 size_t bpf_comp_jeq64(struct sock_filter *filter, uint64_t c, unsigned char jt,
     77 		      unsigned char jf)
     78 {
     79 	unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
     80 	unsigned int hi = (unsigned int)(c >> 32);
     81 
     82 	struct sock_filter *curr_block = filter;
     83 
     84 	/* bpf_load_arg leaves |hi| in A */
     85 	curr_block += bpf_comp_jeq32(curr_block, hi, NEXT, SKIPN(2) + jf);
     86 	set_bpf_stmt(curr_block++, BPF_LD + BPF_MEM, 0); /* swap in |lo| */
     87 	curr_block += bpf_comp_jeq32(curr_block, lo, jt, jf);
     88 
     89 	return curr_block - filter;
     90 }
     91 #endif
     92 
     93 size_t bpf_comp_jgt32(struct sock_filter *filter, unsigned long c,
     94 		      unsigned char jt, unsigned char jf)
     95 {
     96 	unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
     97 	set_bpf_jump(filter, BPF_JMP + BPF_JGT + BPF_K, lo, jt, jf);
     98 	return 1U;
     99 }
    100 
    101 size_t bpf_comp_jge32(struct sock_filter *filter, unsigned long c,
    102 		      unsigned char jt, unsigned char jf)
    103 {
    104 	unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
    105 	set_bpf_jump(filter, BPF_JMP + BPF_JGE + BPF_K, lo, jt, jf);
    106 	return 1U;
    107 }
    108 
    109 /*
    110  * On 64 bits, we have to do two/three 32-bit comparisons.
    111  * We jump true when the |hi| comparison is true *or* |hi| is equal and the
    112  * |lo| comparison is true.
    113  */
    114 #if defined(BITS64)
    115 size_t bpf_comp_jgt64(struct sock_filter *filter, uint64_t c, unsigned char jt,
    116 		      unsigned char jf)
    117 {
    118 	unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
    119 	unsigned int hi = (unsigned int)(c >> 32);
    120 
    121 	struct sock_filter *curr_block = filter;
    122 
    123 	/* bpf_load_arg leaves |hi| in A. */
    124 	if (hi == 0) {
    125 		curr_block +=
    126 		    bpf_comp_jgt32(curr_block, hi, SKIPN(2) + jt, NEXT);
    127 	} else {
    128 		curr_block +=
    129 		    bpf_comp_jgt32(curr_block, hi, SKIPN(3) + jt, NEXT);
    130 		curr_block +=
    131 		    bpf_comp_jeq32(curr_block, hi, NEXT, SKIPN(2) + jf);
    132 	}
    133 	set_bpf_stmt(curr_block++, BPF_LD + BPF_MEM, 0); /* swap in |lo| */
    134 	curr_block += bpf_comp_jgt32(curr_block, lo, jt, jf);
    135 
    136 	return curr_block - filter;
    137 }
    138 
    139 size_t bpf_comp_jge64(struct sock_filter *filter, uint64_t c, unsigned char jt,
    140 		      unsigned char jf)
    141 {
    142 	unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
    143 	unsigned int hi = (unsigned int)(c >> 32);
    144 
    145 	struct sock_filter *curr_block = filter;
    146 
    147 	/* bpf_load_arg leaves |hi| in A. */
    148 	if (hi == 0) {
    149 		curr_block +=
    150 		    bpf_comp_jgt32(curr_block, hi, SKIPN(2) + jt, NEXT);
    151 	} else {
    152 		curr_block +=
    153 		    bpf_comp_jgt32(curr_block, hi, SKIPN(3) + jt, NEXT);
    154 		curr_block +=
    155 		    bpf_comp_jeq32(curr_block, hi, NEXT, SKIPN(2) + jf);
    156 	}
    157 	set_bpf_stmt(curr_block++, BPF_LD + BPF_MEM, 0); /* swap in |lo| */
    158 	curr_block += bpf_comp_jge32(curr_block, lo, jt, jf);
    159 
    160 	return curr_block - filter;
    161 }
    162 #endif
    163 
    164 /* Size-aware bitwise AND. */
    165 size_t bpf_comp_jset32(struct sock_filter *filter, unsigned long mask,
    166 		       unsigned char jt, unsigned char jf)
    167 {
    168 	unsigned int mask_lo = (unsigned int)(mask & 0xFFFFFFFF);
    169 	set_bpf_jump(filter, BPF_JMP + BPF_JSET + BPF_K, mask_lo, jt, jf);
    170 	return 1U;
    171 }
    172 
    173 /*
    174  * On 64 bits, we have to do two 32-bit bitwise ANDs.
    175  * We jump true when *either* bitwise AND is true (non-zero).
    176  */
    177 #if defined(BITS64)
    178 size_t bpf_comp_jset64(struct sock_filter *filter, uint64_t mask,
    179 		       unsigned char jt, unsigned char jf)
    180 {
    181 	unsigned int mask_lo = (unsigned int)(mask & 0xFFFFFFFF);
    182 	unsigned int mask_hi = (unsigned int)(mask >> 32);
    183 
    184 	struct sock_filter *curr_block = filter;
    185 
    186 	/* bpf_load_arg leaves |hi| in A */
    187 	curr_block += bpf_comp_jset32(curr_block, mask_hi, SKIPN(2) + jt, NEXT);
    188 	set_bpf_stmt(curr_block++, BPF_LD + BPF_MEM, 0); /* swap in |lo| */
    189 	curr_block += bpf_comp_jset32(curr_block, mask_lo, jt, jf);
    190 
    191 	return curr_block - filter;
    192 }
    193 #endif
    194 
    195 size_t bpf_comp_jin(struct sock_filter *filter, unsigned long mask,
    196 		    unsigned char jt, unsigned char jf)
    197 {
    198 	unsigned long negative_mask = ~mask;
    199 	/*
    200 	 * The mask is negated, so the comparison will be true when the argument
    201 	 * includes a flag that wasn't listed in the original (non-negated)
    202 	 * mask. This would be the failure case, so we switch |jt| and |jf|.
    203 	 */
    204 	return bpf_comp_jset(filter, negative_mask, jf, jt);
    205 }
    206 
    207 static size_t bpf_arg_comp_len(int op, unsigned long c attribute_unused)
    208 {
    209 	/* The comparisons that use gt/ge internally may have extra opcodes. */
    210 	switch (op) {
    211 	case LT:
    212 	case LE:
    213 	case GT:
    214 	case GE:
    215 #if defined(BITS64)
    216 		/*
    217 		 * |c| can only have a high 32-bit part when running on 64 bits.
    218 		 */
    219 		if ((c >> 32) == 0)
    220 			return BPF_ARG_SHORT_GT_GE_COMP_LEN + 1;
    221 #endif
    222 		return BPF_ARG_GT_GE_COMP_LEN + 1;
    223 	default:
    224 		return BPF_ARG_COMP_LEN + 1;
    225 	}
    226 }
    227 
    228 size_t bpf_arg_comp(struct sock_filter **pfilter, int op, int argidx,
    229 		    unsigned long c, unsigned int label_id)
    230 {
    231 	size_t filter_len = bpf_arg_comp_len(op, c);
    232 	struct sock_filter *filter =
    233 	    calloc(filter_len, sizeof(struct sock_filter));
    234 	struct sock_filter *curr_block = filter;
    235 	size_t (*comp_function)(struct sock_filter * filter, unsigned long k,
    236 				unsigned char jt, unsigned char jf);
    237 	int flip = 0;
    238 
    239 	/* Load arg */
    240 	curr_block += bpf_load_arg(curr_block, argidx);
    241 
    242 	/* Jump type */
    243 	switch (op) {
    244 	case EQ:
    245 		comp_function = bpf_comp_jeq;
    246 		flip = 0;
    247 		break;
    248 	case NE:
    249 		comp_function = bpf_comp_jeq;
    250 		flip = 1;
    251 		break;
    252 	case LT:
    253 		comp_function = bpf_comp_jge;
    254 		flip = 1;
    255 		break;
    256 	case LE:
    257 		comp_function = bpf_comp_jgt;
    258 		flip = 1;
    259 		break;
    260 	case GT:
    261 		comp_function = bpf_comp_jgt;
    262 		flip = 0;
    263 		break;
    264 	case GE:
    265 		comp_function = bpf_comp_jge;
    266 		flip = 0;
    267 		break;
    268 	case SET:
    269 		comp_function = bpf_comp_jset;
    270 		flip = 0;
    271 		break;
    272 	case IN:
    273 		comp_function = bpf_comp_jin;
    274 		flip = 0;
    275 		break;
    276 	default:
    277 		*pfilter = NULL;
    278 		return 0;
    279 	}
    280 
    281 	/*
    282 	 * It's easier for the rest of the code to have the true branch
    283 	 * skip and the false branch fall through.
    284 	 */
    285 	unsigned char jt = flip ? NEXT : SKIP;
    286 	unsigned char jf = flip ? SKIP : NEXT;
    287 	curr_block += comp_function(curr_block, c, jt, jf);
    288 	curr_block += set_bpf_jump_lbl(curr_block, label_id);
    289 
    290 	*pfilter = filter;
    291 	return curr_block - filter;
    292 }
    293 
    294 int bpf_resolve_jumps(struct bpf_labels *labels, struct sock_filter *filter,
    295 		      size_t len)
    296 {
    297 	struct sock_filter *instr;
    298 	size_t i, offset;
    299 
    300 	if (len > BPF_MAXINSNS)
    301 		return -1;
    302 
    303 	/*
    304 	 * Walk it once, backwards, to build the label table and do fixups.
    305 	 * Since backward jumps are disallowed by BPF, this is easy.
    306 	 */
    307 	for (i = 0; i < len; i++) {
    308 		offset = len - i - 1;
    309 		instr = &filter[offset];
    310 		if (instr->code != (BPF_JMP + BPF_JA))
    311 			continue;
    312 		switch ((instr->jt << 8) | instr->jf) {
    313 		case (JUMP_JT << 8) | JUMP_JF:
    314 			if (instr->k >= labels->count) {
    315 				warn("nonexistent label id: %u", instr->k);
    316 				return -1;
    317 			}
    318 			if (labels->labels[instr->k].location == 0xffffffff) {
    319 				warn("unresolved label: '%s'",
    320 				     labels->labels[instr->k].label);
    321 				return -1;
    322 			}
    323 			instr->k =
    324 			    labels->labels[instr->k].location - (offset + 1);
    325 			instr->jt = 0;
    326 			instr->jf = 0;
    327 			continue;
    328 		case (LABEL_JT << 8) | LABEL_JF:
    329 			if (labels->labels[instr->k].location != 0xffffffff) {
    330 				warn("duplicate label: '%s'",
    331 				     labels->labels[instr->k].label);
    332 				return -1;
    333 			}
    334 			labels->labels[instr->k].location = offset;
    335 			instr->k = 0; /* Fall through. */
    336 			instr->jt = 0;
    337 			instr->jf = 0;
    338 			continue;
    339 		}
    340 	}
    341 	return 0;
    342 }
    343 
    344 /* Simple lookup table for labels. */
    345 int bpf_label_id(struct bpf_labels *labels, const char *label)
    346 {
    347 	struct __bpf_label *begin = labels->labels, *end;
    348 	int id;
    349 	if (labels->count == 0) {
    350 		begin->label = strndup(label, MAX_BPF_LABEL_LEN);
    351 		if (!begin->label) {
    352 			return -1;
    353 		}
    354 		begin->location = 0xffffffff;
    355 		labels->count++;
    356 		return 0;
    357 	}
    358 	end = begin + labels->count;
    359 	for (id = 0; begin < end; ++begin, ++id) {
    360 		if (!strcmp(label, begin->label)) {
    361 			return id;
    362 		}
    363 	}
    364 
    365 	/* The label wasn't found. Insert it only if there's space. */
    366 	if (labels->count == BPF_LABELS_MAX) {
    367 		return -1;
    368 	}
    369 	begin->label = strndup(label, MAX_BPF_LABEL_LEN);
    370 	if (!begin->label) {
    371 		return -1;
    372 	}
    373 	begin->location = 0xffffffff;
    374 	labels->count++;
    375 	return id;
    376 }
    377 
    378 void free_label_strings(struct bpf_labels *labels)
    379 {
    380 	if (labels->count == 0)
    381 		return;
    382 
    383 	struct __bpf_label *begin = labels->labels, *end;
    384 
    385 	end = begin + labels->count;
    386 	for (; begin < end; ++begin) {
    387 		if (begin->label)
    388 			free((void *)(begin->label));
    389 	}
    390 
    391 	labels->count = 0;
    392 }
    393