Home | History | Annotate | Download | only in seccomp-bpf
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "sandbox/linux/seccomp-bpf/verifier.h"
      6 
      7 #include <string.h>
      8 
      9 #include <limits>
     10 
     11 #include "sandbox/linux/seccomp-bpf/linux_seccomp.h"
     12 #include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
     13 #include "sandbox/linux/seccomp-bpf/sandbox_bpf_policy.h"
     14 #include "sandbox/linux/seccomp-bpf/syscall_iterator.h"
     15 
     16 namespace sandbox {
     17 
     18 namespace {
     19 
     20 const uint64_t kLower32Bits = std::numeric_limits<uint32_t>::max();
     21 const uint64_t kUpper32Bits = static_cast<uint64_t>(kLower32Bits) << 32;
     22 const uint64_t kFull64Bits = std::numeric_limits<uint64_t>::max();
     23 
     24 struct State {
     25   State(const std::vector<struct sock_filter>& p,
     26         const struct arch_seccomp_data& d)
     27       : program(p), data(d), ip(0), accumulator(0), acc_is_valid(false) {}
     28   const std::vector<struct sock_filter>& program;
     29   const struct arch_seccomp_data& data;
     30   unsigned int ip;
     31   uint32_t accumulator;
     32   bool acc_is_valid;
     33 
     34  private:
     35   DISALLOW_IMPLICIT_CONSTRUCTORS(State);
     36 };
     37 
     38 uint32_t EvaluateErrorCode(SandboxBPF* sandbox,
     39                            const ErrorCode& code,
     40                            const struct arch_seccomp_data& data) {
     41   if (code.error_type() == ErrorCode::ET_SIMPLE ||
     42       code.error_type() == ErrorCode::ET_TRAP) {
     43     return code.err();
     44   } else if (code.error_type() == ErrorCode::ET_COND) {
     45     if (code.width() == ErrorCode::TP_32BIT &&
     46         (data.args[code.argno()] >> 32) &&
     47         (data.args[code.argno()] & 0xFFFFFFFF80000000ull) !=
     48             0xFFFFFFFF80000000ull) {
     49       return sandbox->Unexpected64bitArgument().err();
     50     }
     51     bool equal = (data.args[code.argno()] & code.mask()) == code.value();
     52     return EvaluateErrorCode(
     53         sandbox, equal ? *code.passed() : *code.failed(), data);
     54   } else {
     55     return SECCOMP_RET_INVALID;
     56   }
     57 }
     58 
     59 bool VerifyErrorCode(SandboxBPF* sandbox,
     60                      const std::vector<struct sock_filter>& program,
     61                      struct arch_seccomp_data* data,
     62                      const ErrorCode& root_code,
     63                      const ErrorCode& code,
     64                      const char** err) {
     65   if (code.error_type() == ErrorCode::ET_SIMPLE ||
     66       code.error_type() == ErrorCode::ET_TRAP) {
     67     uint32_t computed_ret = Verifier::EvaluateBPF(program, *data, err);
     68     if (*err) {
     69       return false;
     70     } else if (computed_ret != EvaluateErrorCode(sandbox, root_code, *data)) {
     71       // For efficiency's sake, we'd much rather compare "computed_ret"
     72       // against "code.err()". This works most of the time, but it doesn't
     73       // always work for nested conditional expressions. The test values
     74       // that we generate on the fly to probe expressions can trigger
     75       // code flow decisions in multiple nodes of the decision tree, and the
     76       // only way to compute the correct error code in that situation is by
     77       // calling EvaluateErrorCode().
     78       *err = "Exit code from BPF program doesn't match";
     79       return false;
     80     }
     81   } else if (code.error_type() == ErrorCode::ET_COND) {
     82     if (code.argno() < 0 || code.argno() >= 6) {
     83       *err = "Invalid argument number in error code";
     84       return false;
     85     }
     86 
     87     // TODO(mdempsky): The test values generated here try to provide good
     88     // coverage for generated BPF instructions while avoiding combinatorial
     89     // explosion on large policies. Ideally we would instead take a fuzzing-like
     90     // approach and generate a bounded number of test cases regardless of policy
     91     // size.
     92 
     93     // Verify that we can check a value for simple equality.
     94     data->args[code.argno()] = code.value();
     95     if (!VerifyErrorCode(
     96             sandbox, program, data, root_code, *code.passed(), err)) {
     97       return false;
     98     }
     99 
    100     // If mask ignores any bits, verify that setting those bits is still
    101     // detected as equality.
    102     uint64_t ignored_bits = ~code.mask();
    103     if (code.width() == ErrorCode::TP_32BIT) {
    104       ignored_bits = static_cast<uint32_t>(ignored_bits);
    105     }
    106     if ((ignored_bits & kLower32Bits) != 0) {
    107       data->args[code.argno()] = code.value() | (ignored_bits & kLower32Bits);
    108       if (!VerifyErrorCode(
    109               sandbox, program, data, root_code, *code.passed(), err)) {
    110         return false;
    111       }
    112     }
    113     if ((ignored_bits & kUpper32Bits) != 0) {
    114       data->args[code.argno()] = code.value() | (ignored_bits & kUpper32Bits);
    115       if (!VerifyErrorCode(
    116               sandbox, program, data, root_code, *code.passed(), err)) {
    117         return false;
    118       }
    119     }
    120 
    121     // Verify that changing bits included in the mask is detected as inequality.
    122     if ((code.mask() & kLower32Bits) != 0) {
    123       data->args[code.argno()] = code.value() ^ (code.mask() & kLower32Bits);
    124       if (!VerifyErrorCode(
    125               sandbox, program, data, root_code, *code.failed(), err)) {
    126         return false;
    127       }
    128     }
    129     if ((code.mask() & kUpper32Bits) != 0) {
    130       data->args[code.argno()] = code.value() ^ (code.mask() & kUpper32Bits);
    131       if (!VerifyErrorCode(
    132               sandbox, program, data, root_code, *code.failed(), err)) {
    133         return false;
    134       }
    135     }
    136 
    137     if (code.width() == ErrorCode::TP_32BIT) {
    138       // For 32-bit system call arguments, we emit additional instructions to
    139       // validate the upper 32-bits. Here we test that validation.
    140 
    141       // Arbitrary 64-bit values should be rejected.
    142       data->args[code.argno()] = 1ULL << 32;
    143       if (!VerifyErrorCode(sandbox,
    144                            program,
    145                            data,
    146                            root_code,
    147                            sandbox->Unexpected64bitArgument(),
    148                            err)) {
    149         return false;
    150       }
    151 
    152       // Upper 32-bits set without the MSB of the lower 32-bits set should be
    153       // rejected too.
    154       data->args[code.argno()] = kUpper32Bits;
    155       if (!VerifyErrorCode(sandbox,
    156                            program,
    157                            data,
    158                            root_code,
    159                            sandbox->Unexpected64bitArgument(),
    160                            err)) {
    161         return false;
    162       }
    163     }
    164   } else {
    165     *err = "Attempting to return invalid error code from BPF program";
    166     return false;
    167   }
    168   return true;
    169 }
    170 
    171 void Ld(State* state, const struct sock_filter& insn, const char** err) {
    172   if (BPF_SIZE(insn.code) != BPF_W || BPF_MODE(insn.code) != BPF_ABS) {
    173     *err = "Invalid BPF_LD instruction";
    174     return;
    175   }
    176   if (insn.k < sizeof(struct arch_seccomp_data) && (insn.k & 3) == 0) {
    177     // We only allow loading of properly aligned 32bit quantities.
    178     memcpy(&state->accumulator,
    179            reinterpret_cast<const char*>(&state->data) + insn.k,
    180            4);
    181   } else {
    182     *err = "Invalid operand in BPF_LD instruction";
    183     return;
    184   }
    185   state->acc_is_valid = true;
    186   return;
    187 }
    188 
    189 void Jmp(State* state, const struct sock_filter& insn, const char** err) {
    190   if (BPF_OP(insn.code) == BPF_JA) {
    191     if (state->ip + insn.k + 1 >= state->program.size() ||
    192         state->ip + insn.k + 1 <= state->ip) {
    193     compilation_failure:
    194       *err = "Invalid BPF_JMP instruction";
    195       return;
    196     }
    197     state->ip += insn.k;
    198   } else {
    199     if (BPF_SRC(insn.code) != BPF_K || !state->acc_is_valid ||
    200         state->ip + insn.jt + 1 >= state->program.size() ||
    201         state->ip + insn.jf + 1 >= state->program.size()) {
    202       goto compilation_failure;
    203     }
    204     switch (BPF_OP(insn.code)) {
    205       case BPF_JEQ:
    206         if (state->accumulator == insn.k) {
    207           state->ip += insn.jt;
    208         } else {
    209           state->ip += insn.jf;
    210         }
    211         break;
    212       case BPF_JGT:
    213         if (state->accumulator > insn.k) {
    214           state->ip += insn.jt;
    215         } else {
    216           state->ip += insn.jf;
    217         }
    218         break;
    219       case BPF_JGE:
    220         if (state->accumulator >= insn.k) {
    221           state->ip += insn.jt;
    222         } else {
    223           state->ip += insn.jf;
    224         }
    225         break;
    226       case BPF_JSET:
    227         if (state->accumulator & insn.k) {
    228           state->ip += insn.jt;
    229         } else {
    230           state->ip += insn.jf;
    231         }
    232         break;
    233       default:
    234         goto compilation_failure;
    235     }
    236   }
    237 }
    238 
    239 uint32_t Ret(State*, const struct sock_filter& insn, const char** err) {
    240   if (BPF_SRC(insn.code) != BPF_K) {
    241     *err = "Invalid BPF_RET instruction";
    242     return 0;
    243   }
    244   return insn.k;
    245 }
    246 
    247 void Alu(State* state, const struct sock_filter& insn, const char** err) {
    248   if (BPF_OP(insn.code) == BPF_NEG) {
    249     state->accumulator = -state->accumulator;
    250     return;
    251   } else {
    252     if (BPF_SRC(insn.code) != BPF_K) {
    253       *err = "Unexpected source operand in arithmetic operation";
    254       return;
    255     }
    256     switch (BPF_OP(insn.code)) {
    257       case BPF_ADD:
    258         state->accumulator += insn.k;
    259         break;
    260       case BPF_SUB:
    261         state->accumulator -= insn.k;
    262         break;
    263       case BPF_MUL:
    264         state->accumulator *= insn.k;
    265         break;
    266       case BPF_DIV:
    267         if (!insn.k) {
    268           *err = "Illegal division by zero";
    269           break;
    270         }
    271         state->accumulator /= insn.k;
    272         break;
    273       case BPF_MOD:
    274         if (!insn.k) {
    275           *err = "Illegal division by zero";
    276           break;
    277         }
    278         state->accumulator %= insn.k;
    279         break;
    280       case BPF_OR:
    281         state->accumulator |= insn.k;
    282         break;
    283       case BPF_XOR:
    284         state->accumulator ^= insn.k;
    285         break;
    286       case BPF_AND:
    287         state->accumulator &= insn.k;
    288         break;
    289       case BPF_LSH:
    290         if (insn.k > 32) {
    291           *err = "Illegal shift operation";
    292           break;
    293         }
    294         state->accumulator <<= insn.k;
    295         break;
    296       case BPF_RSH:
    297         if (insn.k > 32) {
    298           *err = "Illegal shift operation";
    299           break;
    300         }
    301         state->accumulator >>= insn.k;
    302         break;
    303       default:
    304         *err = "Invalid operator in arithmetic operation";
    305         break;
    306     }
    307   }
    308 }
    309 
    310 }  // namespace
    311 
    312 bool Verifier::VerifyBPF(SandboxBPF* sandbox,
    313                          const std::vector<struct sock_filter>& program,
    314                          const SandboxBPFPolicy& policy,
    315                          const char** err) {
    316   *err = NULL;
    317   for (SyscallIterator iter(false); !iter.Done();) {
    318     uint32_t sysnum = iter.Next();
    319     // We ideally want to iterate over the full system call range and values
    320     // just above and just below this range. This gives us the full result set
    321     // of the "evaluators".
    322     // On Intel systems, this can fail in a surprising way, as a cleared bit 30
    323     // indicates either i386 or x86-64; and a set bit 30 indicates x32. And
    324     // unless we pay attention to setting this bit correctly, an early check in
    325     // our BPF program will make us fail with a misleading error code.
    326     struct arch_seccomp_data data = {static_cast<int>(sysnum),
    327                                      static_cast<uint32_t>(SECCOMP_ARCH)};
    328 #if defined(__i386__) || defined(__x86_64__)
    329 #if defined(__x86_64__) && defined(__ILP32__)
    330     if (!(sysnum & 0x40000000u)) {
    331       continue;
    332     }
    333 #else
    334     if (sysnum & 0x40000000u) {
    335       continue;
    336     }
    337 #endif
    338 #endif
    339     ErrorCode code = iter.IsValid(sysnum)
    340                          ? policy.EvaluateSyscall(sandbox, sysnum)
    341                          : policy.InvalidSyscall(sandbox);
    342     if (!VerifyErrorCode(sandbox, program, &data, code, code, err)) {
    343       return false;
    344     }
    345   }
    346   return true;
    347 }
    348 
    349 uint32_t Verifier::EvaluateBPF(const std::vector<struct sock_filter>& program,
    350                                const struct arch_seccomp_data& data,
    351                                const char** err) {
    352   *err = NULL;
    353   if (program.size() < 1 || program.size() >= SECCOMP_MAX_PROGRAM_SIZE) {
    354     *err = "Invalid program length";
    355     return 0;
    356   }
    357   for (State state(program, data); !*err; ++state.ip) {
    358     if (state.ip >= program.size()) {
    359       *err = "Invalid instruction pointer in BPF program";
    360       break;
    361     }
    362     const struct sock_filter& insn = program[state.ip];
    363     switch (BPF_CLASS(insn.code)) {
    364       case BPF_LD:
    365         Ld(&state, insn, err);
    366         break;
    367       case BPF_JMP:
    368         Jmp(&state, insn, err);
    369         break;
    370       case BPF_RET: {
    371         uint32_t r = Ret(&state, insn, err);
    372         switch (r & SECCOMP_RET_ACTION) {
    373           case SECCOMP_RET_TRAP:
    374           case SECCOMP_RET_ERRNO:
    375           case SECCOMP_RET_TRACE:
    376           case SECCOMP_RET_ALLOW:
    377             break;
    378           case SECCOMP_RET_KILL:     // We don't ever generate this
    379           case SECCOMP_RET_INVALID:  // Should never show up in BPF program
    380           default:
    381             *err = "Unexpected return code found in BPF program";
    382             return 0;
    383         }
    384         return r;
    385       }
    386       case BPF_ALU:
    387         Alu(&state, insn, err);
    388         break;
    389       default:
    390         *err = "Unexpected instruction in BPF program";
    391         break;
    392     }
    393   }
    394   return 0;
    395 }
    396 
    397 }  // namespace sandbox
    398