1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "sandbox/linux/seccomp-bpf/sandbox_bpf.h" 6 7 // Some headers on Android are missing cdefs: crbug.com/172337. 8 // (We can't use OS_ANDROID here since build_config.h is not included). 9 #if defined(ANDROID) 10 #include <sys/cdefs.h> 11 #endif 12 13 #include <errno.h> 14 #include <fcntl.h> 15 #include <string.h> 16 #include <sys/prctl.h> 17 #include <sys/stat.h> 18 #include <sys/syscall.h> 19 #include <sys/types.h> 20 #include <time.h> 21 #include <unistd.h> 22 23 #include "base/compiler_specific.h" 24 #include "base/logging.h" 25 #include "base/memory/scoped_ptr.h" 26 #include "base/posix/eintr_wrapper.h" 27 #include "sandbox/linux/seccomp-bpf/codegen.h" 28 #include "sandbox/linux/seccomp-bpf/sandbox_bpf_policy.h" 29 #include "sandbox/linux/seccomp-bpf/syscall.h" 30 #include "sandbox/linux/seccomp-bpf/syscall_iterator.h" 31 #include "sandbox/linux/seccomp-bpf/verifier.h" 32 33 namespace sandbox { 34 35 namespace { 36 37 const int kExpectedExitCode = 100; 38 39 int popcount(uint32_t x) { 40 return __builtin_popcount(x); 41 } 42 43 #if !defined(NDEBUG) 44 void WriteFailedStderrSetupMessage(int out_fd) { 45 const char* error_string = strerror(errno); 46 static const char msg[] = 47 "You have reproduced a puzzling issue.\n" 48 "Please, report to crbug.com/152530!\n" 49 "Failed to set up stderr: "; 50 if (HANDLE_EINTR(write(out_fd, msg, sizeof(msg) - 1)) > 0 && error_string && 51 HANDLE_EINTR(write(out_fd, error_string, strlen(error_string))) > 0 && 52 HANDLE_EINTR(write(out_fd, "\n", 1))) { 53 } 54 } 55 #endif // !defined(NDEBUG) 56 57 // We define a really simple sandbox policy. It is just good enough for us 58 // to tell that the sandbox has actually been activated. 59 ErrorCode ProbeEvaluator(SandboxBPF*, int sysnum, void*) __attribute__((const)); 60 ErrorCode ProbeEvaluator(SandboxBPF*, int sysnum, void*) { 61 switch (sysnum) { 62 case __NR_getpid: 63 // Return EPERM so that we can check that the filter actually ran. 64 return ErrorCode(EPERM); 65 case __NR_exit_group: 66 // Allow exit() with a non-default return code. 67 return ErrorCode(ErrorCode::ERR_ALLOWED); 68 default: 69 // Make everything else fail in an easily recognizable way. 70 return ErrorCode(EINVAL); 71 } 72 } 73 74 void ProbeProcess(void) { 75 if (syscall(__NR_getpid) < 0 && errno == EPERM) { 76 syscall(__NR_exit_group, static_cast<intptr_t>(kExpectedExitCode)); 77 } 78 } 79 80 ErrorCode AllowAllEvaluator(SandboxBPF*, int sysnum, void*) { 81 if (!SandboxBPF::IsValidSyscallNumber(sysnum)) { 82 return ErrorCode(ENOSYS); 83 } 84 return ErrorCode(ErrorCode::ERR_ALLOWED); 85 } 86 87 void TryVsyscallProcess(void) { 88 time_t current_time; 89 // time() is implemented as a vsyscall. With an older glibc, with 90 // vsyscall=emulate and some versions of the seccomp BPF patch 91 // we may get SIGKILL-ed. Detect this! 92 if (time(¤t_time) != static_cast<time_t>(-1)) { 93 syscall(__NR_exit_group, static_cast<intptr_t>(kExpectedExitCode)); 94 } 95 } 96 97 bool IsSingleThreaded(int proc_fd) { 98 if (proc_fd < 0) { 99 // Cannot determine whether program is single-threaded. Hope for 100 // the best... 101 return true; 102 } 103 104 struct stat sb; 105 int task = -1; 106 if ((task = openat(proc_fd, "self/task", O_RDONLY | O_DIRECTORY)) < 0 || 107 fstat(task, &sb) != 0 || sb.st_nlink != 3 || IGNORE_EINTR(close(task))) { 108 if (task >= 0) { 109 if (IGNORE_EINTR(close(task))) { 110 } 111 } 112 return false; 113 } 114 return true; 115 } 116 117 bool IsDenied(const ErrorCode& code) { 118 return (code.err() & SECCOMP_RET_ACTION) == SECCOMP_RET_TRAP || 119 (code.err() >= (SECCOMP_RET_ERRNO + ErrorCode::ERR_MIN_ERRNO) && 120 code.err() <= (SECCOMP_RET_ERRNO + ErrorCode::ERR_MAX_ERRNO)); 121 } 122 123 // Function that can be passed as a callback function to CodeGen::Traverse(). 124 // Checks whether the "insn" returns an UnsafeTrap() ErrorCode. If so, it 125 // sets the "bool" variable pointed to by "aux". 126 void CheckForUnsafeErrorCodes(Instruction* insn, void* aux) { 127 bool* is_unsafe = static_cast<bool*>(aux); 128 if (!*is_unsafe) { 129 if (BPF_CLASS(insn->code) == BPF_RET && insn->k > SECCOMP_RET_TRAP && 130 insn->k - SECCOMP_RET_TRAP <= SECCOMP_RET_DATA) { 131 const ErrorCode& err = 132 Trap::ErrorCodeFromTrapId(insn->k & SECCOMP_RET_DATA); 133 if (err.error_type() != ErrorCode::ET_INVALID && !err.safe()) { 134 *is_unsafe = true; 135 } 136 } 137 } 138 } 139 140 // A Trap() handler that returns an "errno" value. The value is encoded 141 // in the "aux" parameter. 142 intptr_t ReturnErrno(const struct arch_seccomp_data&, void* aux) { 143 // TrapFnc functions report error by following the native kernel convention 144 // of returning an exit code in the range of -1..-4096. They do not try to 145 // set errno themselves. The glibc wrapper that triggered the SIGSYS will 146 // ultimately do so for us. 147 int err = reinterpret_cast<intptr_t>(aux) & SECCOMP_RET_DATA; 148 return -err; 149 } 150 151 // Function that can be passed as a callback function to CodeGen::Traverse(). 152 // Checks whether the "insn" returns an errno value from a BPF filter. If so, 153 // it rewrites the instruction to instead call a Trap() handler that does 154 // the same thing. "aux" is ignored. 155 void RedirectToUserspace(Instruction* insn, void* aux) { 156 // When inside an UnsafeTrap() callback, we want to allow all system calls. 157 // This means, we must conditionally disable the sandbox -- and that's not 158 // something that kernel-side BPF filters can do, as they cannot inspect 159 // any state other than the syscall arguments. 160 // But if we redirect all error handlers to user-space, then we can easily 161 // make this decision. 162 // The performance penalty for this extra round-trip to user-space is not 163 // actually that bad, as we only ever pay it for denied system calls; and a 164 // typical program has very few of these. 165 SandboxBPF* sandbox = static_cast<SandboxBPF*>(aux); 166 if (BPF_CLASS(insn->code) == BPF_RET && 167 (insn->k & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) { 168 insn->k = sandbox->Trap(ReturnErrno, 169 reinterpret_cast<void*>(insn->k & SECCOMP_RET_DATA)).err(); 170 } 171 } 172 173 // This wraps an existing policy and changes its behavior to match the changes 174 // made by RedirectToUserspace(). This is part of the framework that allows BPF 175 // evaluation in userland. 176 // TODO(markus): document the code inside better. 177 class RedirectToUserSpacePolicyWrapper : public SandboxBPFPolicy { 178 public: 179 explicit RedirectToUserSpacePolicyWrapper( 180 const SandboxBPFPolicy* wrapped_policy) 181 : wrapped_policy_(wrapped_policy) { 182 DCHECK(wrapped_policy_); 183 } 184 185 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox_compiler, 186 int system_call_number) const OVERRIDE { 187 ErrorCode err = 188 wrapped_policy_->EvaluateSyscall(sandbox_compiler, system_call_number); 189 if ((err.err() & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) { 190 return sandbox_compiler->Trap( 191 ReturnErrno, reinterpret_cast<void*>(err.err() & SECCOMP_RET_DATA)); 192 } 193 return err; 194 } 195 196 private: 197 const SandboxBPFPolicy* wrapped_policy_; 198 DISALLOW_COPY_AND_ASSIGN(RedirectToUserSpacePolicyWrapper); 199 }; 200 201 intptr_t BPFFailure(const struct arch_seccomp_data&, void* aux) { 202 SANDBOX_DIE(static_cast<char*>(aux)); 203 } 204 205 // This class allows compatibility with the old, deprecated SetSandboxPolicy. 206 class CompatibilityPolicy : public SandboxBPFPolicy { 207 public: 208 CompatibilityPolicy(SandboxBPF::EvaluateSyscall syscall_evaluator, void* aux) 209 : syscall_evaluator_(syscall_evaluator), aux_(aux) { 210 DCHECK(syscall_evaluator_); 211 } 212 213 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox_compiler, 214 int system_call_number) const OVERRIDE { 215 return syscall_evaluator_(sandbox_compiler, system_call_number, aux_); 216 } 217 218 private: 219 SandboxBPF::EvaluateSyscall syscall_evaluator_; 220 void* aux_; 221 DISALLOW_COPY_AND_ASSIGN(CompatibilityPolicy); 222 }; 223 224 } // namespace 225 226 SandboxBPF::SandboxBPF() 227 : quiet_(false), 228 proc_fd_(-1), 229 conds_(new Conds), 230 sandbox_has_started_(false) {} 231 232 SandboxBPF::~SandboxBPF() { 233 // It is generally unsafe to call any memory allocator operations or to even 234 // call arbitrary destructors after having installed a new policy. We just 235 // have no way to tell whether this policy would allow the system calls that 236 // the constructors can trigger. 237 // So, we normally destroy all of our complex state prior to starting the 238 // sandbox. But this won't happen, if the Sandbox object was created and 239 // never actually used to set up a sandbox. So, just in case, we are 240 // destroying any remaining state. 241 // The "if ()" statements are technically superfluous. But let's be explicit 242 // that we really don't want to run any code, when we already destroyed 243 // objects before setting up the sandbox. 244 if (conds_) { 245 delete conds_; 246 } 247 } 248 249 bool SandboxBPF::IsValidSyscallNumber(int sysnum) { 250 return SyscallIterator::IsValid(sysnum); 251 } 252 253 bool SandboxBPF::RunFunctionInPolicy(void (*code_in_sandbox)(), 254 EvaluateSyscall syscall_evaluator, 255 void* aux) { 256 // Block all signals before forking a child process. This prevents an 257 // attacker from manipulating our test by sending us an unexpected signal. 258 sigset_t old_mask, new_mask; 259 if (sigfillset(&new_mask) || sigprocmask(SIG_BLOCK, &new_mask, &old_mask)) { 260 SANDBOX_DIE("sigprocmask() failed"); 261 } 262 int fds[2]; 263 if (pipe2(fds, O_NONBLOCK | O_CLOEXEC)) { 264 SANDBOX_DIE("pipe() failed"); 265 } 266 267 if (fds[0] <= 2 || fds[1] <= 2) { 268 SANDBOX_DIE("Process started without standard file descriptors"); 269 } 270 271 // This code is using fork() and should only ever run single-threaded. 272 // Most of the code below is "async-signal-safe" and only minor changes 273 // would be needed to support threads. 274 DCHECK(IsSingleThreaded(proc_fd_)); 275 pid_t pid = fork(); 276 if (pid < 0) { 277 // Die if we cannot fork(). We would probably fail a little later 278 // anyway, as the machine is likely very close to running out of 279 // memory. 280 // But what we don't want to do is return "false", as a crafty 281 // attacker might cause fork() to fail at will and could trick us 282 // into running without a sandbox. 283 sigprocmask(SIG_SETMASK, &old_mask, NULL); // OK, if it fails 284 SANDBOX_DIE("fork() failed unexpectedly"); 285 } 286 287 // In the child process 288 if (!pid) { 289 // Test a very simple sandbox policy to verify that we can 290 // successfully turn on sandboxing. 291 Die::EnableSimpleExit(); 292 293 errno = 0; 294 if (IGNORE_EINTR(close(fds[0]))) { 295 // This call to close() has been failing in strange ways. See 296 // crbug.com/152530. So we only fail in debug mode now. 297 #if !defined(NDEBUG) 298 WriteFailedStderrSetupMessage(fds[1]); 299 SANDBOX_DIE(NULL); 300 #endif 301 } 302 if (HANDLE_EINTR(dup2(fds[1], 2)) != 2) { 303 // Stderr could very well be a file descriptor to .xsession-errors, or 304 // another file, which could be backed by a file system that could cause 305 // dup2 to fail while trying to close stderr. It's important that we do 306 // not fail on trying to close stderr. 307 // If dup2 fails here, we will continue normally, this means that our 308 // parent won't cause a fatal failure if something writes to stderr in 309 // this child. 310 #if !defined(NDEBUG) 311 // In DEBUG builds, we still want to get a report. 312 WriteFailedStderrSetupMessage(fds[1]); 313 SANDBOX_DIE(NULL); 314 #endif 315 } 316 if (IGNORE_EINTR(close(fds[1]))) { 317 // This call to close() has been failing in strange ways. See 318 // crbug.com/152530. So we only fail in debug mode now. 319 #if !defined(NDEBUG) 320 WriteFailedStderrSetupMessage(fds[1]); 321 SANDBOX_DIE(NULL); 322 #endif 323 } 324 325 SetSandboxPolicyDeprecated(syscall_evaluator, aux); 326 StartSandbox(); 327 328 // Run our code in the sandbox. 329 code_in_sandbox(); 330 331 // code_in_sandbox() is not supposed to return here. 332 SANDBOX_DIE(NULL); 333 } 334 335 // In the parent process. 336 if (IGNORE_EINTR(close(fds[1]))) { 337 SANDBOX_DIE("close() failed"); 338 } 339 if (sigprocmask(SIG_SETMASK, &old_mask, NULL)) { 340 SANDBOX_DIE("sigprocmask() failed"); 341 } 342 int status; 343 if (HANDLE_EINTR(waitpid(pid, &status, 0)) != pid) { 344 SANDBOX_DIE("waitpid() failed unexpectedly"); 345 } 346 bool rc = WIFEXITED(status) && WEXITSTATUS(status) == kExpectedExitCode; 347 348 // If we fail to support sandboxing, there might be an additional 349 // error message. If so, this was an entirely unexpected and fatal 350 // failure. We should report the failure and somebody must fix 351 // things. This is probably a security-critical bug in the sandboxing 352 // code. 353 if (!rc) { 354 char buf[4096]; 355 ssize_t len = HANDLE_EINTR(read(fds[0], buf, sizeof(buf) - 1)); 356 if (len > 0) { 357 while (len > 1 && buf[len - 1] == '\n') { 358 --len; 359 } 360 buf[len] = '\000'; 361 SANDBOX_DIE(buf); 362 } 363 } 364 if (IGNORE_EINTR(close(fds[0]))) { 365 SANDBOX_DIE("close() failed"); 366 } 367 368 return rc; 369 } 370 371 bool SandboxBPF::KernelSupportSeccompBPF() { 372 return RunFunctionInPolicy(ProbeProcess, ProbeEvaluator, 0) && 373 RunFunctionInPolicy(TryVsyscallProcess, AllowAllEvaluator, 0); 374 } 375 376 SandboxBPF::SandboxStatus SandboxBPF::SupportsSeccompSandbox(int proc_fd) { 377 // It the sandbox is currently active, we clearly must have support for 378 // sandboxing. 379 if (status_ == STATUS_ENABLED) { 380 return status_; 381 } 382 383 // Even if the sandbox was previously available, something might have 384 // changed in our run-time environment. Check one more time. 385 if (status_ == STATUS_AVAILABLE) { 386 if (!IsSingleThreaded(proc_fd)) { 387 status_ = STATUS_UNAVAILABLE; 388 } 389 return status_; 390 } 391 392 if (status_ == STATUS_UNAVAILABLE && IsSingleThreaded(proc_fd)) { 393 // All state transitions resulting in STATUS_UNAVAILABLE are immediately 394 // preceded by STATUS_AVAILABLE. Furthermore, these transitions all 395 // happen, if and only if they are triggered by the process being multi- 396 // threaded. 397 // In other words, if a single-threaded process is currently in the 398 // STATUS_UNAVAILABLE state, it is safe to assume that sandboxing is 399 // actually available. 400 status_ = STATUS_AVAILABLE; 401 return status_; 402 } 403 404 // If we have not previously checked for availability of the sandbox or if 405 // we otherwise don't believe to have a good cached value, we have to 406 // perform a thorough check now. 407 if (status_ == STATUS_UNKNOWN) { 408 // We create our own private copy of a "Sandbox" object. This ensures that 409 // the object does not have any policies configured, that might interfere 410 // with the tests done by "KernelSupportSeccompBPF()". 411 SandboxBPF sandbox; 412 413 // By setting "quiet_ = true" we suppress messages for expected and benign 414 // failures (e.g. if the current kernel lacks support for BPF filters). 415 sandbox.quiet_ = true; 416 sandbox.set_proc_fd(proc_fd); 417 status_ = sandbox.KernelSupportSeccompBPF() ? STATUS_AVAILABLE 418 : STATUS_UNSUPPORTED; 419 420 // As we are performing our tests from a child process, the run-time 421 // environment that is visible to the sandbox is always guaranteed to be 422 // single-threaded. Let's check here whether the caller is single- 423 // threaded. Otherwise, we mark the sandbox as temporarily unavailable. 424 if (status_ == STATUS_AVAILABLE && !IsSingleThreaded(proc_fd)) { 425 status_ = STATUS_UNAVAILABLE; 426 } 427 } 428 return status_; 429 } 430 431 void SandboxBPF::set_proc_fd(int proc_fd) { proc_fd_ = proc_fd; } 432 433 void SandboxBPF::StartSandbox() { 434 if (status_ == STATUS_UNSUPPORTED || status_ == STATUS_UNAVAILABLE) { 435 SANDBOX_DIE( 436 "Trying to start sandbox, even though it is known to be " 437 "unavailable"); 438 } else if (sandbox_has_started_ || !conds_) { 439 SANDBOX_DIE( 440 "Cannot repeatedly start sandbox. Create a separate Sandbox " 441 "object instead."); 442 } 443 if (proc_fd_ < 0) { 444 proc_fd_ = open("/proc", O_RDONLY | O_DIRECTORY); 445 } 446 if (proc_fd_ < 0) { 447 // For now, continue in degraded mode, if we can't access /proc. 448 // In the future, we might want to tighten this requirement. 449 } 450 if (!IsSingleThreaded(proc_fd_)) { 451 SANDBOX_DIE("Cannot start sandbox, if process is already multi-threaded"); 452 } 453 454 // We no longer need access to any files in /proc. We want to do this 455 // before installing the filters, just in case that our policy denies 456 // close(). 457 if (proc_fd_ >= 0) { 458 if (IGNORE_EINTR(close(proc_fd_))) { 459 SANDBOX_DIE("Failed to close file descriptor for /proc"); 460 } 461 proc_fd_ = -1; 462 } 463 464 // Install the filters. 465 InstallFilter(); 466 467 // We are now inside the sandbox. 468 status_ = STATUS_ENABLED; 469 } 470 471 void SandboxBPF::PolicySanityChecks(SandboxBPFPolicy* policy) { 472 for (SyscallIterator iter(true); !iter.Done();) { 473 uint32_t sysnum = iter.Next(); 474 if (!IsDenied(policy->EvaluateSyscall(this, sysnum))) { 475 SANDBOX_DIE( 476 "Policies should deny system calls that are outside the " 477 "expected range (typically MIN_SYSCALL..MAX_SYSCALL)"); 478 } 479 } 480 return; 481 } 482 483 // Deprecated API, supported with a wrapper to the new API. 484 void SandboxBPF::SetSandboxPolicyDeprecated(EvaluateSyscall syscall_evaluator, 485 void* aux) { 486 if (sandbox_has_started_ || !conds_) { 487 SANDBOX_DIE("Cannot change policy after sandbox has started"); 488 } 489 SetSandboxPolicy(new CompatibilityPolicy(syscall_evaluator, aux)); 490 } 491 492 // Don't take a scoped_ptr here, polymorphism make their use awkward. 493 void SandboxBPF::SetSandboxPolicy(SandboxBPFPolicy* policy) { 494 DCHECK(!policy_); 495 if (sandbox_has_started_ || !conds_) { 496 SANDBOX_DIE("Cannot change policy after sandbox has started"); 497 } 498 PolicySanityChecks(policy); 499 policy_.reset(policy); 500 } 501 502 void SandboxBPF::InstallFilter() { 503 // We want to be very careful in not imposing any requirements on the 504 // policies that are set with SetSandboxPolicy(). This means, as soon as 505 // the sandbox is active, we shouldn't be relying on libraries that could 506 // be making system calls. This, for example, means we should avoid 507 // using the heap and we should avoid using STL functions. 508 // Temporarily copy the contents of the "program" vector into a 509 // stack-allocated array; and then explicitly destroy that object. 510 // This makes sure we don't ex- or implicitly call new/delete after we 511 // installed the BPF filter program in the kernel. Depending on the 512 // system memory allocator that is in effect, these operators can result 513 // in system calls to things like munmap() or brk(). 514 Program* program = AssembleFilter(false /* force_verification */); 515 516 struct sock_filter bpf[program->size()]; 517 const struct sock_fprog prog = {static_cast<unsigned short>(program->size()), 518 bpf}; 519 memcpy(bpf, &(*program)[0], sizeof(bpf)); 520 delete program; 521 522 // Make an attempt to release memory that is no longer needed here, rather 523 // than in the destructor. Try to avoid as much as possible to presume of 524 // what will be possible to do in the new (sandboxed) execution environment. 525 delete conds_; 526 conds_ = NULL; 527 policy_.reset(); 528 529 // Install BPF filter program 530 if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 531 SANDBOX_DIE(quiet_ ? NULL : "Kernel refuses to enable no-new-privs"); 532 } else { 533 if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog)) { 534 SANDBOX_DIE(quiet_ ? NULL : "Kernel refuses to turn on BPF filters"); 535 } 536 } 537 538 sandbox_has_started_ = true; 539 540 return; 541 } 542 543 SandboxBPF::Program* SandboxBPF::AssembleFilter(bool force_verification) { 544 #if !defined(NDEBUG) 545 force_verification = true; 546 #endif 547 548 // Verify that the user pushed a policy. 549 DCHECK(policy_); 550 551 // Assemble the BPF filter program. 552 CodeGen* gen = new CodeGen(); 553 if (!gen) { 554 SANDBOX_DIE("Out of memory"); 555 } 556 557 // If the architecture doesn't match SECCOMP_ARCH, disallow the 558 // system call. 559 Instruction* tail; 560 Instruction* head = gen->MakeInstruction( 561 BPF_LD + BPF_W + BPF_ABS, 562 SECCOMP_ARCH_IDX, 563 tail = gen->MakeInstruction( 564 BPF_JMP + BPF_JEQ + BPF_K, 565 SECCOMP_ARCH, 566 NULL, 567 gen->MakeInstruction( 568 BPF_RET + BPF_K, 569 Kill("Invalid audit architecture in BPF filter")))); 570 571 bool has_unsafe_traps = false; 572 { 573 // Evaluate all possible system calls and group their ErrorCodes into 574 // ranges of identical codes. 575 Ranges ranges; 576 FindRanges(&ranges); 577 578 // Compile the system call ranges to an optimized BPF jumptable 579 Instruction* jumptable = 580 AssembleJumpTable(gen, ranges.begin(), ranges.end()); 581 582 // If there is at least one UnsafeTrap() in our program, the entire sandbox 583 // is unsafe. We need to modify the program so that all non- 584 // SECCOMP_RET_ALLOW ErrorCodes are handled in user-space. This will then 585 // allow us to temporarily disable sandboxing rules inside of callbacks to 586 // UnsafeTrap(). 587 gen->Traverse(jumptable, CheckForUnsafeErrorCodes, &has_unsafe_traps); 588 589 // Grab the system call number, so that we can implement jump tables. 590 Instruction* load_nr = 591 gen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, SECCOMP_NR_IDX); 592 593 // If our BPF program has unsafe jumps, enable support for them. This 594 // test happens very early in the BPF filter program. Even before we 595 // consider looking at system call numbers. 596 // As support for unsafe jumps essentially defeats all the security 597 // measures that the sandbox provides, we print a big warning message -- 598 // and of course, we make sure to only ever enable this feature if it 599 // is actually requested by the sandbox policy. 600 if (has_unsafe_traps) { 601 if (SandboxSyscall(-1) == -1 && errno == ENOSYS) { 602 SANDBOX_DIE( 603 "Support for UnsafeTrap() has not yet been ported to this " 604 "architecture"); 605 } 606 607 if (!policy_->EvaluateSyscall(this, __NR_rt_sigprocmask) 608 .Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) || 609 !policy_->EvaluateSyscall(this, __NR_rt_sigreturn) 610 .Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) 611 #if defined(__NR_sigprocmask) 612 || 613 !policy_->EvaluateSyscall(this, __NR_sigprocmask) 614 .Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) 615 #endif 616 #if defined(__NR_sigreturn) 617 || 618 !policy_->EvaluateSyscall(this, __NR_sigreturn) 619 .Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) 620 #endif 621 ) { 622 SANDBOX_DIE( 623 "Invalid seccomp policy; if using UnsafeTrap(), you must " 624 "unconditionally allow sigreturn() and sigprocmask()"); 625 } 626 627 if (!Trap::EnableUnsafeTrapsInSigSysHandler()) { 628 // We should never be able to get here, as UnsafeTrap() should never 629 // actually return a valid ErrorCode object unless the user set the 630 // CHROME_SANDBOX_DEBUGGING environment variable; and therefore, 631 // "has_unsafe_traps" would always be false. But better double-check 632 // than enabling dangerous code. 633 SANDBOX_DIE("We'd rather die than enable unsafe traps"); 634 } 635 gen->Traverse(jumptable, RedirectToUserspace, this); 636 637 // Allow system calls, if they originate from our magic return address 638 // (which we can query by calling SandboxSyscall(-1)). 639 uintptr_t syscall_entry_point = 640 static_cast<uintptr_t>(SandboxSyscall(-1)); 641 uint32_t low = static_cast<uint32_t>(syscall_entry_point); 642 #if __SIZEOF_POINTER__ > 4 643 uint32_t hi = static_cast<uint32_t>(syscall_entry_point >> 32); 644 #endif 645 646 // BPF cannot do native 64bit comparisons. On 64bit architectures, we 647 // have to compare both 32bit halves of the instruction pointer. If they 648 // match what we expect, we return ERR_ALLOWED. If either or both don't 649 // match, we continue evalutating the rest of the sandbox policy. 650 Instruction* escape_hatch = gen->MakeInstruction( 651 BPF_LD + BPF_W + BPF_ABS, 652 SECCOMP_IP_LSB_IDX, 653 gen->MakeInstruction( 654 BPF_JMP + BPF_JEQ + BPF_K, 655 low, 656 #if __SIZEOF_POINTER__ > 4 657 gen->MakeInstruction( 658 BPF_LD + BPF_W + BPF_ABS, 659 SECCOMP_IP_MSB_IDX, 660 gen->MakeInstruction( 661 BPF_JMP + BPF_JEQ + BPF_K, 662 hi, 663 #endif 664 gen->MakeInstruction(BPF_RET + BPF_K, 665 ErrorCode(ErrorCode::ERR_ALLOWED)), 666 #if __SIZEOF_POINTER__ > 4 667 load_nr)), 668 #endif 669 load_nr)); 670 gen->JoinInstructions(tail, escape_hatch); 671 } else { 672 gen->JoinInstructions(tail, load_nr); 673 } 674 tail = load_nr; 675 676 // On Intel architectures, verify that system call numbers are in the 677 // expected number range. The older i386 and x86-64 APIs clear bit 30 678 // on all system calls. The newer x32 API always sets bit 30. 679 #if defined(__i386__) || defined(__x86_64__) 680 Instruction* invalidX32 = gen->MakeInstruction( 681 BPF_RET + BPF_K, Kill("Illegal mixing of system call ABIs").err_); 682 Instruction* checkX32 = 683 #if defined(__x86_64__) && defined(__ILP32__) 684 gen->MakeInstruction( 685 BPF_JMP + BPF_JSET + BPF_K, 0x40000000, 0, invalidX32); 686 #else 687 gen->MakeInstruction( 688 BPF_JMP + BPF_JSET + BPF_K, 0x40000000, invalidX32, 0); 689 #endif 690 gen->JoinInstructions(tail, checkX32); 691 tail = checkX32; 692 #endif 693 694 // Append jump table to our pre-amble 695 gen->JoinInstructions(tail, jumptable); 696 } 697 698 // Turn the DAG into a vector of instructions. 699 Program* program = new Program(); 700 gen->Compile(head, program); 701 delete gen; 702 703 // Make sure compilation resulted in BPF program that executes 704 // correctly. Otherwise, there is an internal error in our BPF compiler. 705 // There is really nothing the caller can do until the bug is fixed. 706 if (force_verification) { 707 // Verification is expensive. We only perform this step, if we are 708 // compiled in debug mode, or if the caller explicitly requested 709 // verification. 710 VerifyProgram(*program, has_unsafe_traps); 711 } 712 713 return program; 714 } 715 716 void SandboxBPF::VerifyProgram(const Program& program, bool has_unsafe_traps) { 717 // If we previously rewrote the BPF program so that it calls user-space 718 // whenever we return an "errno" value from the filter, then we have to 719 // wrap our system call evaluator to perform the same operation. Otherwise, 720 // the verifier would also report a mismatch in return codes. 721 scoped_ptr<const RedirectToUserSpacePolicyWrapper> redirected_policy( 722 new RedirectToUserSpacePolicyWrapper(policy_.get())); 723 724 const char* err = NULL; 725 if (!Verifier::VerifyBPF(this, 726 program, 727 has_unsafe_traps ? *redirected_policy : *policy_, 728 &err)) { 729 CodeGen::PrintProgram(program); 730 SANDBOX_DIE(err); 731 } 732 } 733 734 void SandboxBPF::FindRanges(Ranges* ranges) { 735 // Please note that "struct seccomp_data" defines system calls as a signed 736 // int32_t, but BPF instructions always operate on unsigned quantities. We 737 // deal with this disparity by enumerating from MIN_SYSCALL to MAX_SYSCALL, 738 // and then verifying that the rest of the number range (both positive and 739 // negative) all return the same ErrorCode. 740 uint32_t old_sysnum = 0; 741 ErrorCode old_err = policy_->EvaluateSyscall(this, old_sysnum); 742 ErrorCode invalid_err = policy_->EvaluateSyscall(this, MIN_SYSCALL - 1); 743 744 for (SyscallIterator iter(false); !iter.Done();) { 745 uint32_t sysnum = iter.Next(); 746 ErrorCode err = policy_->EvaluateSyscall(this, static_cast<int>(sysnum)); 747 if (!iter.IsValid(sysnum) && !invalid_err.Equals(err)) { 748 // A proper sandbox policy should always treat system calls outside of 749 // the range MIN_SYSCALL..MAX_SYSCALL (i.e. anything that returns 750 // "false" for SyscallIterator::IsValid()) identically. Typically, all 751 // of these system calls would be denied with the same ErrorCode. 752 SANDBOX_DIE("Invalid seccomp policy"); 753 } 754 if (!err.Equals(old_err) || iter.Done()) { 755 ranges->push_back(Range(old_sysnum, sysnum - 1, old_err)); 756 old_sysnum = sysnum; 757 old_err = err; 758 } 759 } 760 } 761 762 Instruction* SandboxBPF::AssembleJumpTable(CodeGen* gen, 763 Ranges::const_iterator start, 764 Ranges::const_iterator stop) { 765 // We convert the list of system call ranges into jump table that performs 766 // a binary search over the ranges. 767 // As a sanity check, we need to have at least one distinct ranges for us 768 // to be able to build a jump table. 769 if (stop - start <= 0) { 770 SANDBOX_DIE("Invalid set of system call ranges"); 771 } else if (stop - start == 1) { 772 // If we have narrowed things down to a single range object, we can 773 // return from the BPF filter program. 774 return RetExpression(gen, start->err); 775 } 776 777 // Pick the range object that is located at the mid point of our list. 778 // We compare our system call number against the lowest valid system call 779 // number in this range object. If our number is lower, it is outside of 780 // this range object. If it is greater or equal, it might be inside. 781 Ranges::const_iterator mid = start + (stop - start) / 2; 782 783 // Sub-divide the list of ranges and continue recursively. 784 Instruction* jf = AssembleJumpTable(gen, start, mid); 785 Instruction* jt = AssembleJumpTable(gen, mid, stop); 786 return gen->MakeInstruction(BPF_JMP + BPF_JGE + BPF_K, mid->from, jt, jf); 787 } 788 789 Instruction* SandboxBPF::RetExpression(CodeGen* gen, const ErrorCode& err) { 790 if (err.error_type_ == ErrorCode::ET_COND) { 791 return CondExpression(gen, err); 792 } else { 793 return gen->MakeInstruction(BPF_RET + BPF_K, err); 794 } 795 } 796 797 Instruction* SandboxBPF::CondExpression(CodeGen* gen, const ErrorCode& cond) { 798 // We can only inspect the six system call arguments that are passed in 799 // CPU registers. 800 if (cond.argno_ < 0 || cond.argno_ >= 6) { 801 SANDBOX_DIE( 802 "Internal compiler error; invalid argument number " 803 "encountered"); 804 } 805 806 // BPF programs operate on 32bit entities. Load both halfs of the 64bit 807 // system call argument and then generate suitable conditional statements. 808 Instruction* msb_head = gen->MakeInstruction( 809 BPF_LD + BPF_W + BPF_ABS, SECCOMP_ARG_MSB_IDX(cond.argno_)); 810 Instruction* msb_tail = msb_head; 811 Instruction* lsb_head = gen->MakeInstruction( 812 BPF_LD + BPF_W + BPF_ABS, SECCOMP_ARG_LSB_IDX(cond.argno_)); 813 Instruction* lsb_tail = lsb_head; 814 815 // Emit a suitable comparison statement. 816 switch (cond.op_) { 817 case ErrorCode::OP_EQUAL: 818 // Compare the least significant bits for equality 819 lsb_tail = gen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 820 static_cast<uint32_t>(cond.value_), 821 RetExpression(gen, *cond.passed_), 822 RetExpression(gen, *cond.failed_)); 823 gen->JoinInstructions(lsb_head, lsb_tail); 824 825 // If we are looking at a 64bit argument, we need to also compare the 826 // most significant bits. 827 if (cond.width_ == ErrorCode::TP_64BIT) { 828 msb_tail = 829 gen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 830 static_cast<uint32_t>(cond.value_ >> 32), 831 lsb_head, 832 RetExpression(gen, *cond.failed_)); 833 gen->JoinInstructions(msb_head, msb_tail); 834 } 835 break; 836 case ErrorCode::OP_HAS_ALL_BITS: 837 // Check the bits in the LSB half of the system call argument. Our 838 // OP_HAS_ALL_BITS operator passes, iff all of the bits are set. This is 839 // different from the kernel's BPF_JSET operation which passes, if any of 840 // the bits are set. 841 // Of course, if there is only a single set bit (or none at all), then 842 // things get easier. 843 { 844 uint32_t lsb_bits = static_cast<uint32_t>(cond.value_); 845 int lsb_bit_count = popcount(lsb_bits); 846 if (lsb_bit_count == 0) { 847 // No bits are set in the LSB half. The test will always pass. 848 lsb_head = RetExpression(gen, *cond.passed_); 849 lsb_tail = NULL; 850 } else if (lsb_bit_count == 1) { 851 // Exactly one bit is set in the LSB half. We can use the BPF_JSET 852 // operator. 853 lsb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K, 854 lsb_bits, 855 RetExpression(gen, *cond.passed_), 856 RetExpression(gen, *cond.failed_)); 857 gen->JoinInstructions(lsb_head, lsb_tail); 858 } else { 859 // More than one bit is set in the LSB half. We need to combine 860 // BPF_AND and BPF_JEQ to test whether all of these bits are in fact 861 // set in the system call argument. 862 gen->JoinInstructions( 863 lsb_head, 864 gen->MakeInstruction(BPF_ALU + BPF_AND + BPF_K, 865 lsb_bits, 866 lsb_tail = gen->MakeInstruction( 867 BPF_JMP + BPF_JEQ + BPF_K, 868 lsb_bits, 869 RetExpression(gen, *cond.passed_), 870 RetExpression(gen, *cond.failed_)))); 871 } 872 } 873 874 // If we are looking at a 64bit argument, we need to also check the bits 875 // in the MSB half of the system call argument. 876 if (cond.width_ == ErrorCode::TP_64BIT) { 877 uint32_t msb_bits = static_cast<uint32_t>(cond.value_ >> 32); 878 int msb_bit_count = popcount(msb_bits); 879 if (msb_bit_count == 0) { 880 // No bits are set in the MSB half. The test will always pass. 881 msb_head = lsb_head; 882 } else if (msb_bit_count == 1) { 883 // Exactly one bit is set in the MSB half. We can use the BPF_JSET 884 // operator. 885 msb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K, 886 msb_bits, 887 lsb_head, 888 RetExpression(gen, *cond.failed_)); 889 gen->JoinInstructions(msb_head, msb_tail); 890 } else { 891 // More than one bit is set in the MSB half. We need to combine 892 // BPF_AND and BPF_JEQ to test whether all of these bits are in fact 893 // set in the system call argument. 894 gen->JoinInstructions( 895 msb_head, 896 gen->MakeInstruction( 897 BPF_ALU + BPF_AND + BPF_K, 898 msb_bits, 899 gen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 900 msb_bits, 901 lsb_head, 902 RetExpression(gen, *cond.failed_)))); 903 } 904 } 905 break; 906 case ErrorCode::OP_HAS_ANY_BITS: 907 // Check the bits in the LSB half of the system call argument. Our 908 // OP_HAS_ANY_BITS operator passes, iff any of the bits are set. This maps 909 // nicely to the kernel's BPF_JSET operation. 910 { 911 uint32_t lsb_bits = static_cast<uint32_t>(cond.value_); 912 if (!lsb_bits) { 913 // No bits are set in the LSB half. The test will always fail. 914 lsb_head = RetExpression(gen, *cond.failed_); 915 lsb_tail = NULL; 916 } else { 917 lsb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K, 918 lsb_bits, 919 RetExpression(gen, *cond.passed_), 920 RetExpression(gen, *cond.failed_)); 921 gen->JoinInstructions(lsb_head, lsb_tail); 922 } 923 } 924 925 // If we are looking at a 64bit argument, we need to also check the bits 926 // in the MSB half of the system call argument. 927 if (cond.width_ == ErrorCode::TP_64BIT) { 928 uint32_t msb_bits = static_cast<uint32_t>(cond.value_ >> 32); 929 if (!msb_bits) { 930 // No bits are set in the MSB half. The test will always fail. 931 msb_head = lsb_head; 932 } else { 933 msb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K, 934 msb_bits, 935 RetExpression(gen, *cond.passed_), 936 lsb_head); 937 gen->JoinInstructions(msb_head, msb_tail); 938 } 939 } 940 break; 941 default: 942 // TODO(markus): Need to add support for OP_GREATER 943 SANDBOX_DIE("Not implemented"); 944 break; 945 } 946 947 // Ensure that we never pass a 64bit value, when we only expect a 32bit 948 // value. This is somewhat complicated by the fact that on 64bit systems, 949 // callers could legitimately pass in a non-zero value in the MSB, iff the 950 // LSB has been sign-extended into the MSB. 951 if (cond.width_ == ErrorCode::TP_32BIT) { 952 if (cond.value_ >> 32) { 953 SANDBOX_DIE( 954 "Invalid comparison of a 32bit system call argument " 955 "against a 64bit constant; this test is always false."); 956 } 957 958 Instruction* invalid_64bit = RetExpression(gen, Unexpected64bitArgument()); 959 #if __SIZEOF_POINTER__ > 4 960 invalid_64bit = gen->MakeInstruction( 961 BPF_JMP + BPF_JEQ + BPF_K, 962 0xFFFFFFFF, 963 gen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 964 SECCOMP_ARG_LSB_IDX(cond.argno_), 965 gen->MakeInstruction(BPF_JMP + BPF_JGE + BPF_K, 966 0x80000000, 967 lsb_head, 968 invalid_64bit)), 969 invalid_64bit); 970 #endif 971 gen->JoinInstructions( 972 msb_tail, 973 gen->MakeInstruction( 974 BPF_JMP + BPF_JEQ + BPF_K, 0, lsb_head, invalid_64bit)); 975 } 976 977 return msb_head; 978 } 979 980 ErrorCode SandboxBPF::Unexpected64bitArgument() { 981 return Kill("Unexpected 64bit argument detected"); 982 } 983 984 ErrorCode SandboxBPF::Trap(Trap::TrapFnc fnc, const void* aux) { 985 return Trap::MakeTrap(fnc, aux, true /* Safe Trap */); 986 } 987 988 ErrorCode SandboxBPF::UnsafeTrap(Trap::TrapFnc fnc, const void* aux) { 989 return Trap::MakeTrap(fnc, aux, false /* Unsafe Trap */); 990 } 991 992 intptr_t SandboxBPF::ForwardSyscall(const struct arch_seccomp_data& args) { 993 return SandboxSyscall(args.nr, 994 static_cast<intptr_t>(args.args[0]), 995 static_cast<intptr_t>(args.args[1]), 996 static_cast<intptr_t>(args.args[2]), 997 static_cast<intptr_t>(args.args[3]), 998 static_cast<intptr_t>(args.args[4]), 999 static_cast<intptr_t>(args.args[5])); 1000 } 1001 1002 ErrorCode SandboxBPF::Cond(int argno, 1003 ErrorCode::ArgType width, 1004 ErrorCode::Operation op, 1005 uint64_t value, 1006 const ErrorCode& passed, 1007 const ErrorCode& failed) { 1008 return ErrorCode(argno, 1009 width, 1010 op, 1011 value, 1012 &*conds_->insert(passed).first, 1013 &*conds_->insert(failed).first); 1014 } 1015 1016 ErrorCode SandboxBPF::Kill(const char* msg) { 1017 return Trap(BPFFailure, const_cast<char*>(msg)); 1018 } 1019 1020 SandboxBPF::SandboxStatus SandboxBPF::status_ = STATUS_UNKNOWN; 1021 1022 } // namespace sandbox 1023