1 /* Copyright (c) 2012 The Chromium OS Authors. All rights reserved. 2 * Use of this source code is governed by a BSD-style license that can be 3 * found in the LICENSE file. 4 */ 5 6 #include <stdint.h> 7 #include <stdio.h> 8 #include <stdlib.h> 9 #include <string.h> 10 11 #include "bpf.h" 12 13 /* Architecture validation. */ 14 size_t bpf_validate_arch(struct sock_filter *filter) 15 { 16 struct sock_filter *curr_block = filter; 17 set_bpf_stmt(curr_block++, BPF_LD+BPF_W+BPF_ABS, arch_nr); 18 set_bpf_jump(curr_block++, 19 BPF_JMP+BPF_JEQ+BPF_K, ARCH_NR, SKIP, NEXT); 20 set_bpf_ret_kill(curr_block++); 21 return curr_block - filter; 22 } 23 24 /* Syscall number eval functions. */ 25 size_t bpf_allow_syscall(struct sock_filter *filter, int nr) 26 { 27 struct sock_filter *curr_block = filter; 28 set_bpf_jump(curr_block++, BPF_JMP+BPF_JEQ+BPF_K, nr, NEXT, SKIP); 29 set_bpf_stmt(curr_block++, BPF_RET+BPF_K, SECCOMP_RET_ALLOW); 30 return curr_block - filter; 31 } 32 33 size_t bpf_allow_syscall_args(struct sock_filter *filter, 34 int nr, unsigned int id) 35 { 36 struct sock_filter *curr_block = filter; 37 set_bpf_jump(curr_block++, BPF_JMP+BPF_JEQ+BPF_K, nr, NEXT, SKIP); 38 set_bpf_jump_lbl(curr_block++, id); 39 return curr_block - filter; 40 } 41 42 /* Size-aware arg loaders. */ 43 #if defined(BITS32) 44 size_t bpf_load_arg(struct sock_filter *filter, int argidx) 45 { 46 set_bpf_stmt(filter, BPF_LD+BPF_W+BPF_ABS, LO_ARG(argidx)); 47 return 1U; 48 } 49 #elif defined(BITS64) 50 size_t bpf_load_arg(struct sock_filter *filter, int argidx) 51 { 52 struct sock_filter *curr_block = filter; 53 set_bpf_stmt(curr_block++, BPF_LD+BPF_W+BPF_ABS, LO_ARG(argidx)); 54 set_bpf_stmt(curr_block++, BPF_ST, 0); /* lo -> M[0] */ 55 set_bpf_stmt(curr_block++, BPF_LD+BPF_W+BPF_ABS, HI_ARG(argidx)); 56 set_bpf_stmt(curr_block++, BPF_ST, 1); /* hi -> M[1] */ 57 return curr_block - filter; 58 } 59 #endif 60 61 /* Size-aware equality comparison. */ 62 size_t bpf_comp_jeq32(struct sock_filter *filter, unsigned long c, 63 unsigned char jt, unsigned char jf) 64 { 65 unsigned int lo = (unsigned int)(c & 0xFFFFFFFF); 66 set_bpf_jump(filter, BPF_JMP+BPF_JEQ+BPF_K, lo, jt, jf); 67 return 1U; 68 } 69 70 /* 71 * On 64 bits, we have to do two 32-bit comparisons. 72 * We jump true when *both* comparisons are true. 73 */ 74 #if defined(BITS64) 75 size_t bpf_comp_jeq64(struct sock_filter *filter, uint64_t c, 76 unsigned char jt, unsigned char jf) 77 { 78 unsigned int lo = (unsigned int)(c & 0xFFFFFFFF); 79 unsigned int hi = (unsigned int)(c >> 32); 80 81 struct sock_filter *curr_block = filter; 82 83 /* bpf_load_arg leaves |hi| in A */ 84 curr_block += bpf_comp_jeq32(curr_block, hi, NEXT, SKIPN(2) + jf); 85 set_bpf_stmt(curr_block++, BPF_LD+BPF_MEM, 0); /* swap in |lo| */ 86 curr_block += bpf_comp_jeq32(curr_block, lo, jt, jf); 87 88 return curr_block - filter; 89 } 90 #endif 91 92 /* Size-aware bitwise AND. */ 93 size_t bpf_comp_jset32(struct sock_filter *filter, unsigned long mask, 94 unsigned char jt, unsigned char jf) 95 { 96 unsigned int mask_lo = (unsigned int)(mask & 0xFFFFFFFF); 97 set_bpf_jump(filter, BPF_JMP+BPF_JSET+BPF_K, mask_lo, jt, jf); 98 return 1U; 99 } 100 101 /* 102 * On 64 bits, we have to do two 32-bit bitwise ANDs. 103 * We jump true when *either* bitwise AND is true (non-zero). 104 */ 105 #if defined(BITS64) 106 size_t bpf_comp_jset64(struct sock_filter *filter, uint64_t mask, 107 unsigned char jt, unsigned char jf) 108 { 109 unsigned int mask_lo = (unsigned int)(mask & 0xFFFFFFFF); 110 unsigned int mask_hi = (unsigned int)(mask >> 32); 111 112 struct sock_filter *curr_block = filter; 113 114 /* bpf_load_arg leaves |hi| in A */ 115 curr_block += bpf_comp_jset32(curr_block, mask_hi, SKIPN(2) + jt, NEXT); 116 set_bpf_stmt(curr_block++, BPF_LD+BPF_MEM, 0); /* swap in |lo| */ 117 curr_block += bpf_comp_jset32(curr_block, mask_lo, jt, jf); 118 119 return curr_block - filter; 120 } 121 #endif 122 123 size_t bpf_arg_comp(struct sock_filter **pfilter, 124 int op, int argidx, unsigned long c, unsigned int label_id) 125 { 126 struct sock_filter *filter = calloc(BPF_ARG_COMP_LEN + 1, 127 sizeof(struct sock_filter)); 128 struct sock_filter *curr_block = filter; 129 size_t (*comp_function)(struct sock_filter *filter, unsigned long k, 130 unsigned char jt, unsigned char jf); 131 int flip = 0; 132 133 /* Load arg */ 134 curr_block += bpf_load_arg(curr_block, argidx); 135 136 /* Jump type */ 137 switch (op) { 138 case EQ: 139 comp_function = bpf_comp_jeq; 140 flip = 0; 141 break; 142 case NE: 143 comp_function = bpf_comp_jeq; 144 flip = 1; 145 break; 146 case SET: 147 comp_function = bpf_comp_jset; 148 flip = 0; 149 break; 150 default: 151 *pfilter = NULL; 152 return 0; 153 } 154 155 /* 156 * It's easier for the rest of the code to have the true branch 157 * skip and the false branch fall through. 158 */ 159 unsigned char jt = flip ? NEXT : SKIP; 160 unsigned char jf = flip ? SKIP : NEXT; 161 curr_block += comp_function(curr_block, c, jt, jf); 162 curr_block += set_bpf_jump_lbl(curr_block, label_id); 163 164 *pfilter = filter; 165 return curr_block - filter; 166 } 167 168 void dump_bpf_filter(struct sock_filter *filter, unsigned short len) 169 { 170 int i = 0; 171 172 printf("len == %d\n", len); 173 printf("filter:\n"); 174 for (i = 0; i < len; i++) { 175 printf("%d: \t{ code=%#x, jt=%u, jf=%u, k=%#x \t}\n", 176 i, filter[i].code, filter[i].jt, filter[i].jf, filter[i].k); 177 } 178 } 179 180 void dump_bpf_prog(struct sock_fprog *fprog) 181 { 182 struct sock_filter *filter = fprog->filter; 183 unsigned short len = fprog->len; 184 dump_bpf_filter(filter, len); 185 } 186 187 int bpf_resolve_jumps(struct bpf_labels *labels, 188 struct sock_filter *filter, size_t count) 189 { 190 struct sock_filter *begin = filter; 191 __u8 insn = count - 1; 192 193 if (count < 1) 194 return -1; 195 /* 196 * Walk it once, backwards, to build the label table and do fixups. 197 * Since backward jumps are disallowed by BPF, this is easy. 198 */ 199 for (filter += insn; filter >= begin; --insn, --filter) { 200 if (filter->code != (BPF_JMP+BPF_JA)) 201 continue; 202 switch ((filter->jt<<8)|filter->jf) { 203 case (JUMP_JT<<8)|JUMP_JF: 204 if (labels->labels[filter->k].location == 0xffffffff) { 205 fprintf(stderr, "Unresolved label: '%s'\n", 206 labels->labels[filter->k].label); 207 return 1; 208 } 209 filter->k = labels->labels[filter->k].location - 210 (insn + 1); 211 filter->jt = 0; 212 filter->jf = 0; 213 continue; 214 case (LABEL_JT<<8)|LABEL_JF: 215 if (labels->labels[filter->k].location != 0xffffffff) { 216 fprintf(stderr, "Duplicate label use: '%s'\n", 217 labels->labels[filter->k].label); 218 return 1; 219 } 220 labels->labels[filter->k].location = insn; 221 filter->k = 0; /* fall through */ 222 filter->jt = 0; 223 filter->jf = 0; 224 continue; 225 } 226 } 227 return 0; 228 } 229 230 /* Simple lookup table for labels. */ 231 int bpf_label_id(struct bpf_labels *labels, const char *label) 232 { 233 struct __bpf_label *begin = labels->labels, *end; 234 int id; 235 if (labels->count == 0) { 236 begin->label = strndup(label, MAX_BPF_LABEL_LEN); 237 if (!begin->label) { 238 return -1; 239 } 240 begin->location = 0xffffffff; 241 labels->count++; 242 return 0; 243 } 244 end = begin + labels->count; 245 for (id = 0; begin < end; ++begin, ++id) { 246 if (!strcmp(label, begin->label)) 247 return id; 248 } 249 begin->label = strndup(label, MAX_BPF_LABEL_LEN); 250 if (!begin->label) { 251 return -1; 252 } 253 begin->location = 0xffffffff; 254 labels->count++; 255 return id; 256 } 257 258 /* Free label strings. */ 259 void free_label_strings(struct bpf_labels *labels) 260 { 261 if (labels->count == 0) 262 return; 263 264 struct __bpf_label *begin = labels->labels, *end; 265 266 end = begin + labels->count; 267 for (; begin < end; ++begin) { 268 if (begin->label) 269 free((void*)(begin->label)); 270 } 271 } 272