1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "sandbox/linux/bpf_dsl/bpf_dsl.h" 6 7 #include <errno.h> 8 #include <fcntl.h> 9 #include <pthread.h> 10 #include <sched.h> 11 #include <signal.h> 12 #include <sys/prctl.h> 13 #include <sys/ptrace.h> 14 #include <sys/syscall.h> 15 #include <sys/time.h> 16 #include <sys/types.h> 17 #include <sys/utsname.h> 18 #include <unistd.h> 19 #include <sys/socket.h> 20 21 #if defined(ANDROID) 22 // Work-around for buggy headers in Android's NDK 23 #define __user 24 #endif 25 #include <linux/futex.h> 26 27 #include "base/bind.h" 28 #include "base/logging.h" 29 #include "base/macros.h" 30 #include "base/memory/scoped_ptr.h" 31 #include "base/posix/eintr_wrapper.h" 32 #include "base/synchronization/waitable_event.h" 33 #include "base/threading/thread.h" 34 #include "build/build_config.h" 35 #include "sandbox/linux/seccomp-bpf/bpf_tests.h" 36 #include "sandbox/linux/seccomp-bpf/die.h" 37 #include "sandbox/linux/seccomp-bpf/linux_seccomp.h" 38 #include "sandbox/linux/seccomp-bpf/sandbox_bpf.h" 39 #include "sandbox/linux/seccomp-bpf/syscall.h" 40 #include "sandbox/linux/seccomp-bpf/trap.h" 41 #include "sandbox/linux/services/broker_process.h" 42 #include "sandbox/linux/services/linux_syscalls.h" 43 #include "sandbox/linux/tests/scoped_temporary_file.h" 44 #include "sandbox/linux/tests/unit_tests.h" 45 #include "testing/gtest/include/gtest/gtest.h" 46 47 // Workaround for Android's prctl.h file. 48 #ifndef PR_GET_ENDIAN 49 #define PR_GET_ENDIAN 19 50 #endif 51 #ifndef PR_CAPBSET_READ 52 #define PR_CAPBSET_READ 23 53 #define PR_CAPBSET_DROP 24 54 #endif 55 56 namespace sandbox { 57 namespace bpf_dsl { 58 59 namespace { 60 61 const int kExpectedReturnValue = 42; 62 const char kSandboxDebuggingEnv[] = "CHROME_SANDBOX_DEBUGGING"; 63 64 // Set the global environment to allow the use of UnsafeTrap() policies. 65 void EnableUnsafeTraps() { 66 // The use of UnsafeTrap() causes us to print a warning message. This is 67 // generally desirable, but it results in the unittest failing, as it doesn't 68 // expect any messages on "stderr". So, temporarily disable messages. The 69 // BPF_TEST() is guaranteed to turn messages back on, after the policy 70 // function has completed. 71 setenv(kSandboxDebuggingEnv, "t", 0); 72 Die::SuppressInfoMessages(true); 73 } 74 75 // This test should execute no matter whether we have kernel support. So, 76 // we make it a TEST() instead of a BPF_TEST(). 77 TEST(SandboxBPF, DISABLE_ON_TSAN(CallSupports)) { 78 // We check that we don't crash, but it's ok if the kernel doesn't 79 // support it. 80 bool seccomp_bpf_supported = 81 SandboxBPF::SupportsSeccompSandbox(-1) == SandboxBPF::STATUS_AVAILABLE; 82 // We want to log whether or not seccomp BPF is actually supported 83 // since actual test coverage depends on it. 84 RecordProperty("SeccompBPFSupported", 85 seccomp_bpf_supported ? "true." : "false."); 86 std::cout << "Seccomp BPF supported: " 87 << (seccomp_bpf_supported ? "true." : "false.") << "\n"; 88 RecordProperty("PointerSize", sizeof(void*)); 89 std::cout << "Pointer size: " << sizeof(void*) << "\n"; 90 } 91 92 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(CallSupportsTwice)) { 93 SandboxBPF::SupportsSeccompSandbox(-1); 94 SandboxBPF::SupportsSeccompSandbox(-1); 95 } 96 97 // BPF_TEST does a lot of the boiler-plate code around setting up a 98 // policy and optional passing data between the caller, the policy and 99 // any Trap() handlers. This is great for writing short and concise tests, 100 // and it helps us accidentally forgetting any of the crucial steps in 101 // setting up the sandbox. But it wouldn't hurt to have at least one test 102 // that explicitly walks through all these steps. 103 104 intptr_t IncreaseCounter(const struct arch_seccomp_data& args, void* aux) { 105 BPF_ASSERT(aux); 106 int* counter = static_cast<int*>(aux); 107 return (*counter)++; 108 } 109 110 class VerboseAPITestingPolicy : public SandboxBPFDSLPolicy { 111 public: 112 explicit VerboseAPITestingPolicy(int* counter_ptr) 113 : counter_ptr_(counter_ptr) {} 114 virtual ~VerboseAPITestingPolicy() {} 115 116 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE { 117 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 118 if (sysno == __NR_uname) { 119 return Trap(IncreaseCounter, counter_ptr_); 120 } 121 return Allow(); 122 } 123 124 private: 125 int* counter_ptr_; 126 127 DISALLOW_COPY_AND_ASSIGN(VerboseAPITestingPolicy); 128 }; 129 130 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(VerboseAPITesting)) { 131 if (SandboxBPF::SupportsSeccompSandbox(-1) == 132 sandbox::SandboxBPF::STATUS_AVAILABLE) { 133 static int counter = 0; 134 135 SandboxBPF sandbox; 136 sandbox.SetSandboxPolicy(new VerboseAPITestingPolicy(&counter)); 137 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED)); 138 139 BPF_ASSERT_EQ(0, counter); 140 BPF_ASSERT_EQ(0, syscall(__NR_uname, 0)); 141 BPF_ASSERT_EQ(1, counter); 142 BPF_ASSERT_EQ(1, syscall(__NR_uname, 0)); 143 BPF_ASSERT_EQ(2, counter); 144 } 145 } 146 147 // A simple blacklist test 148 149 class BlacklistNanosleepPolicy : public SandboxBPFDSLPolicy { 150 public: 151 BlacklistNanosleepPolicy() {} 152 virtual ~BlacklistNanosleepPolicy() {} 153 154 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE { 155 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 156 switch (sysno) { 157 case __NR_nanosleep: 158 return Error(EACCES); 159 default: 160 return Allow(); 161 } 162 } 163 164 static void AssertNanosleepFails() { 165 const struct timespec ts = {0, 0}; 166 errno = 0; 167 BPF_ASSERT_EQ(-1, HANDLE_EINTR(syscall(__NR_nanosleep, &ts, NULL))); 168 BPF_ASSERT_EQ(EACCES, errno); 169 } 170 171 private: 172 DISALLOW_COPY_AND_ASSIGN(BlacklistNanosleepPolicy); 173 }; 174 175 BPF_TEST_C(SandboxBPF, ApplyBasicBlacklistPolicy, BlacklistNanosleepPolicy) { 176 BlacklistNanosleepPolicy::AssertNanosleepFails(); 177 } 178 179 // Now do a simple whitelist test 180 181 class WhitelistGetpidPolicy : public SandboxBPFDSLPolicy { 182 public: 183 WhitelistGetpidPolicy() {} 184 virtual ~WhitelistGetpidPolicy() {} 185 186 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE { 187 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 188 switch (sysno) { 189 case __NR_getpid: 190 case __NR_exit_group: 191 return Allow(); 192 default: 193 return Error(ENOMEM); 194 } 195 } 196 197 private: 198 DISALLOW_COPY_AND_ASSIGN(WhitelistGetpidPolicy); 199 }; 200 201 BPF_TEST_C(SandboxBPF, ApplyBasicWhitelistPolicy, WhitelistGetpidPolicy) { 202 // getpid() should be allowed 203 errno = 0; 204 BPF_ASSERT(syscall(__NR_getpid) > 0); 205 BPF_ASSERT(errno == 0); 206 207 // getpgid() should be denied 208 BPF_ASSERT(getpgid(0) == -1); 209 BPF_ASSERT(errno == ENOMEM); 210 } 211 212 // A simple blacklist policy, with a SIGSYS handler 213 intptr_t EnomemHandler(const struct arch_seccomp_data& args, void* aux) { 214 // We also check that the auxiliary data is correct 215 SANDBOX_ASSERT(aux); 216 *(static_cast<int*>(aux)) = kExpectedReturnValue; 217 return -ENOMEM; 218 } 219 220 class BlacklistNanosleepTrapPolicy : public SandboxBPFDSLPolicy { 221 public: 222 explicit BlacklistNanosleepTrapPolicy(int* aux) : aux_(aux) {} 223 virtual ~BlacklistNanosleepTrapPolicy() {} 224 225 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE { 226 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 227 switch (sysno) { 228 case __NR_nanosleep: 229 return Trap(EnomemHandler, aux_); 230 default: 231 return Allow(); 232 } 233 } 234 235 private: 236 int* aux_; 237 238 DISALLOW_COPY_AND_ASSIGN(BlacklistNanosleepTrapPolicy); 239 }; 240 241 BPF_TEST(SandboxBPF, 242 BasicBlacklistWithSigsys, 243 BlacklistNanosleepTrapPolicy, 244 int /* (*BPF_AUX) */) { 245 // getpid() should work properly 246 errno = 0; 247 BPF_ASSERT(syscall(__NR_getpid) > 0); 248 BPF_ASSERT(errno == 0); 249 250 // Our Auxiliary Data, should be reset by the signal handler 251 *BPF_AUX = -1; 252 const struct timespec ts = {0, 0}; 253 BPF_ASSERT(syscall(__NR_nanosleep, &ts, NULL) == -1); 254 BPF_ASSERT(errno == ENOMEM); 255 256 // We expect the signal handler to modify AuxData 257 BPF_ASSERT(*BPF_AUX == kExpectedReturnValue); 258 } 259 260 // A simple test that verifies we can return arbitrary errno values. 261 262 class ErrnoTestPolicy : public SandboxBPFDSLPolicy { 263 public: 264 ErrnoTestPolicy() {} 265 virtual ~ErrnoTestPolicy() {} 266 267 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE; 268 269 private: 270 DISALLOW_COPY_AND_ASSIGN(ErrnoTestPolicy); 271 }; 272 273 ResultExpr ErrnoTestPolicy::EvaluateSyscall(int sysno) const { 274 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 275 switch (sysno) { 276 case __NR_dup3: // dup2 is a wrapper of dup3 in android 277 #if defined(__NR_dup2) 278 case __NR_dup2: 279 #endif 280 // Pretend that dup2() worked, but don't actually do anything. 281 return Error(0); 282 case __NR_setuid: 283 #if defined(__NR_setuid32) 284 case __NR_setuid32: 285 #endif 286 // Return errno = 1. 287 return Error(1); 288 case __NR_setgid: 289 #if defined(__NR_setgid32) 290 case __NR_setgid32: 291 #endif 292 // Return maximum errno value (typically 4095). 293 return Error(ErrorCode::ERR_MAX_ERRNO); 294 case __NR_uname: 295 // Return errno = 42; 296 return Error(42); 297 default: 298 return Allow(); 299 } 300 } 301 302 BPF_TEST_C(SandboxBPF, ErrnoTest, ErrnoTestPolicy) { 303 // Verify that dup2() returns success, but doesn't actually run. 304 int fds[4]; 305 BPF_ASSERT(pipe(fds) == 0); 306 BPF_ASSERT(pipe(fds + 2) == 0); 307 BPF_ASSERT(dup2(fds[2], fds[0]) == 0); 308 char buf[1] = {}; 309 BPF_ASSERT(write(fds[1], "\x55", 1) == 1); 310 BPF_ASSERT(write(fds[3], "\xAA", 1) == 1); 311 BPF_ASSERT(read(fds[0], buf, 1) == 1); 312 313 // If dup2() executed, we will read \xAA, but it dup2() has been turned 314 // into a no-op by our policy, then we will read \x55. 315 BPF_ASSERT(buf[0] == '\x55'); 316 317 // Verify that we can return the minimum and maximum errno values. 318 errno = 0; 319 BPF_ASSERT(setuid(0) == -1); 320 BPF_ASSERT(errno == 1); 321 322 // On Android, errno is only supported up to 255, otherwise errno 323 // processing is skipped. 324 // We work around this (crbug.com/181647). 325 if (sandbox::IsAndroid() && setgid(0) != -1) { 326 errno = 0; 327 BPF_ASSERT(setgid(0) == -ErrorCode::ERR_MAX_ERRNO); 328 BPF_ASSERT(errno == 0); 329 } else { 330 errno = 0; 331 BPF_ASSERT(setgid(0) == -1); 332 BPF_ASSERT(errno == ErrorCode::ERR_MAX_ERRNO); 333 } 334 335 // Finally, test an errno in between the minimum and maximum. 336 errno = 0; 337 struct utsname uts_buf; 338 BPF_ASSERT(uname(&uts_buf) == -1); 339 BPF_ASSERT(errno == 42); 340 } 341 342 // Testing the stacking of two sandboxes 343 344 class StackingPolicyPartOne : public SandboxBPFDSLPolicy { 345 public: 346 StackingPolicyPartOne() {} 347 virtual ~StackingPolicyPartOne() {} 348 349 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE { 350 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 351 switch (sysno) { 352 case __NR_getppid: { 353 const Arg<int> arg(0); 354 return If(arg == 0, Allow()).Else(Error(EPERM)); 355 } 356 default: 357 return Allow(); 358 } 359 } 360 361 private: 362 DISALLOW_COPY_AND_ASSIGN(StackingPolicyPartOne); 363 }; 364 365 class StackingPolicyPartTwo : public SandboxBPFDSLPolicy { 366 public: 367 StackingPolicyPartTwo() {} 368 virtual ~StackingPolicyPartTwo() {} 369 370 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE { 371 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 372 switch (sysno) { 373 case __NR_getppid: { 374 const Arg<int> arg(0); 375 return If(arg == 0, Error(EINVAL)).Else(Allow()); 376 } 377 default: 378 return Allow(); 379 } 380 } 381 382 private: 383 DISALLOW_COPY_AND_ASSIGN(StackingPolicyPartTwo); 384 }; 385 386 BPF_TEST_C(SandboxBPF, StackingPolicy, StackingPolicyPartOne) { 387 errno = 0; 388 BPF_ASSERT(syscall(__NR_getppid, 0) > 0); 389 BPF_ASSERT(errno == 0); 390 391 BPF_ASSERT(syscall(__NR_getppid, 1) == -1); 392 BPF_ASSERT(errno == EPERM); 393 394 // Stack a second sandbox with its own policy. Verify that we can further 395 // restrict filters, but we cannot relax existing filters. 396 SandboxBPF sandbox; 397 sandbox.SetSandboxPolicy(new StackingPolicyPartTwo()); 398 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED)); 399 400 errno = 0; 401 BPF_ASSERT(syscall(__NR_getppid, 0) == -1); 402 BPF_ASSERT(errno == EINVAL); 403 404 BPF_ASSERT(syscall(__NR_getppid, 1) == -1); 405 BPF_ASSERT(errno == EPERM); 406 } 407 408 // A more complex, but synthetic policy. This tests the correctness of the BPF 409 // program by iterating through all syscalls and checking for an errno that 410 // depends on the syscall number. Unlike the Verifier, this exercises the BPF 411 // interpreter in the kernel. 412 413 // We try to make sure we exercise optimizations in the BPF compiler. We make 414 // sure that the compiler can have an opportunity to coalesce syscalls with 415 // contiguous numbers and we also make sure that disjoint sets can return the 416 // same errno. 417 int SysnoToRandomErrno(int sysno) { 418 // Small contiguous sets of 3 system calls return an errno equal to the 419 // index of that set + 1 (so that we never return a NUL errno). 420 return ((sysno & ~3) >> 2) % 29 + 1; 421 } 422 423 class SyntheticPolicy : public SandboxBPFDSLPolicy { 424 public: 425 SyntheticPolicy() {} 426 virtual ~SyntheticPolicy() {} 427 428 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE { 429 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 430 if (sysno == __NR_exit_group || sysno == __NR_write) { 431 // exit_group() is special, we really need it to work. 432 // write() is needed for BPF_ASSERT() to report a useful error message. 433 return Allow(); 434 } 435 return Error(SysnoToRandomErrno(sysno)); 436 } 437 438 private: 439 DISALLOW_COPY_AND_ASSIGN(SyntheticPolicy); 440 }; 441 442 BPF_TEST_C(SandboxBPF, SyntheticPolicy, SyntheticPolicy) { 443 // Ensure that that kExpectedReturnValue + syscallnumber + 1 does not int 444 // overflow. 445 BPF_ASSERT(std::numeric_limits<int>::max() - kExpectedReturnValue - 1 >= 446 static_cast<int>(MAX_PUBLIC_SYSCALL)); 447 448 for (int syscall_number = static_cast<int>(MIN_SYSCALL); 449 syscall_number <= static_cast<int>(MAX_PUBLIC_SYSCALL); 450 ++syscall_number) { 451 if (syscall_number == __NR_exit_group || syscall_number == __NR_write) { 452 // exit_group() is special 453 continue; 454 } 455 errno = 0; 456 BPF_ASSERT(syscall(syscall_number) == -1); 457 BPF_ASSERT(errno == SysnoToRandomErrno(syscall_number)); 458 } 459 } 460 461 #if defined(__arm__) 462 // A simple policy that tests whether ARM private system calls are supported 463 // by our BPF compiler and by the BPF interpreter in the kernel. 464 465 // For ARM private system calls, return an errno equal to their offset from 466 // MIN_PRIVATE_SYSCALL plus 1 (to avoid NUL errno). 467 int ArmPrivateSysnoToErrno(int sysno) { 468 if (sysno >= static_cast<int>(MIN_PRIVATE_SYSCALL) && 469 sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) { 470 return (sysno - MIN_PRIVATE_SYSCALL) + 1; 471 } else { 472 return ENOSYS; 473 } 474 } 475 476 class ArmPrivatePolicy : public SandboxBPFDSLPolicy { 477 public: 478 ArmPrivatePolicy() {} 479 virtual ~ArmPrivatePolicy() {} 480 481 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE { 482 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 483 // Start from |__ARM_NR_set_tls + 1| so as not to mess with actual 484 // ARM private system calls. 485 if (sysno >= static_cast<int>(__ARM_NR_set_tls + 1) && 486 sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) { 487 return Error(ArmPrivateSysnoToErrno(sysno)); 488 } 489 return Allow(); 490 } 491 492 private: 493 DISALLOW_COPY_AND_ASSIGN(ArmPrivatePolicy); 494 }; 495 496 BPF_TEST_C(SandboxBPF, ArmPrivatePolicy, ArmPrivatePolicy) { 497 for (int syscall_number = static_cast<int>(__ARM_NR_set_tls + 1); 498 syscall_number <= static_cast<int>(MAX_PRIVATE_SYSCALL); 499 ++syscall_number) { 500 errno = 0; 501 BPF_ASSERT(syscall(syscall_number) == -1); 502 BPF_ASSERT(errno == ArmPrivateSysnoToErrno(syscall_number)); 503 } 504 } 505 #endif // defined(__arm__) 506 507 intptr_t CountSyscalls(const struct arch_seccomp_data& args, void* aux) { 508 // Count all invocations of our callback function. 509 ++*reinterpret_cast<int*>(aux); 510 511 // Verify that within the callback function all filtering is temporarily 512 // disabled. 513 BPF_ASSERT(syscall(__NR_getpid) > 1); 514 515 // Verify that we can now call the underlying system call without causing 516 // infinite recursion. 517 return SandboxBPF::ForwardSyscall(args); 518 } 519 520 class GreyListedPolicy : public SandboxBPFDSLPolicy { 521 public: 522 explicit GreyListedPolicy(int* aux) : aux_(aux) { 523 // Set the global environment for unsafe traps once. 524 EnableUnsafeTraps(); 525 } 526 virtual ~GreyListedPolicy() {} 527 528 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE { 529 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 530 // Some system calls must always be allowed, if our policy wants to make 531 // use of UnsafeTrap() 532 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno)) { 533 return Allow(); 534 } else if (sysno == __NR_getpid) { 535 // Disallow getpid() 536 return Error(EPERM); 537 } else { 538 // Allow (and count) all other system calls. 539 return UnsafeTrap(CountSyscalls, aux_); 540 } 541 } 542 543 private: 544 int* aux_; 545 546 DISALLOW_COPY_AND_ASSIGN(GreyListedPolicy); 547 }; 548 549 BPF_TEST(SandboxBPF, GreyListedPolicy, GreyListedPolicy, int /* (*BPF_AUX) */) { 550 BPF_ASSERT(syscall(__NR_getpid) == -1); 551 BPF_ASSERT(errno == EPERM); 552 BPF_ASSERT(*BPF_AUX == 0); 553 BPF_ASSERT(syscall(__NR_geteuid) == syscall(__NR_getuid)); 554 BPF_ASSERT(*BPF_AUX == 2); 555 char name[17] = {}; 556 BPF_ASSERT(!syscall(__NR_prctl, 557 PR_GET_NAME, 558 name, 559 (void*)NULL, 560 (void*)NULL, 561 (void*)NULL)); 562 BPF_ASSERT(*BPF_AUX == 3); 563 BPF_ASSERT(*name); 564 } 565 566 SANDBOX_TEST(SandboxBPF, EnableUnsafeTrapsInSigSysHandler) { 567 // Disabling warning messages that could confuse our test framework. 568 setenv(kSandboxDebuggingEnv, "t", 0); 569 Die::SuppressInfoMessages(true); 570 571 unsetenv(kSandboxDebuggingEnv); 572 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == false); 573 setenv(kSandboxDebuggingEnv, "", 1); 574 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == false); 575 setenv(kSandboxDebuggingEnv, "t", 1); 576 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == true); 577 } 578 579 intptr_t PrctlHandler(const struct arch_seccomp_data& args, void*) { 580 if (args.args[0] == PR_CAPBSET_DROP && static_cast<int>(args.args[1]) == -1) { 581 // prctl(PR_CAPBSET_DROP, -1) is never valid. The kernel will always 582 // return an error. But our handler allows this call. 583 return 0; 584 } else { 585 return SandboxBPF::ForwardSyscall(args); 586 } 587 } 588 589 class PrctlPolicy : public SandboxBPFDSLPolicy { 590 public: 591 PrctlPolicy() {} 592 virtual ~PrctlPolicy() {} 593 594 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE { 595 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 596 setenv(kSandboxDebuggingEnv, "t", 0); 597 Die::SuppressInfoMessages(true); 598 599 if (sysno == __NR_prctl) { 600 // Handle prctl() inside an UnsafeTrap() 601 return UnsafeTrap(PrctlHandler, NULL); 602 } 603 604 // Allow all other system calls. 605 return Allow(); 606 } 607 608 private: 609 DISALLOW_COPY_AND_ASSIGN(PrctlPolicy); 610 }; 611 612 BPF_TEST_C(SandboxBPF, ForwardSyscall, PrctlPolicy) { 613 // This call should never be allowed. But our policy will intercept it and 614 // let it pass successfully. 615 BPF_ASSERT( 616 !prctl(PR_CAPBSET_DROP, -1, (void*)NULL, (void*)NULL, (void*)NULL)); 617 618 // Verify that the call will fail, if it makes it all the way to the kernel. 619 BPF_ASSERT( 620 prctl(PR_CAPBSET_DROP, -2, (void*)NULL, (void*)NULL, (void*)NULL) == -1); 621 622 // And verify that other uses of prctl() work just fine. 623 char name[17] = {}; 624 BPF_ASSERT(!syscall(__NR_prctl, 625 PR_GET_NAME, 626 name, 627 (void*)NULL, 628 (void*)NULL, 629 (void*)NULL)); 630 BPF_ASSERT(*name); 631 632 // Finally, verify that system calls other than prctl() are completely 633 // unaffected by our policy. 634 struct utsname uts = {}; 635 BPF_ASSERT(!uname(&uts)); 636 BPF_ASSERT(!strcmp(uts.sysname, "Linux")); 637 } 638 639 intptr_t AllowRedirectedSyscall(const struct arch_seccomp_data& args, void*) { 640 return SandboxBPF::ForwardSyscall(args); 641 } 642 643 class RedirectAllSyscallsPolicy : public SandboxBPFDSLPolicy { 644 public: 645 RedirectAllSyscallsPolicy() {} 646 virtual ~RedirectAllSyscallsPolicy() {} 647 648 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE; 649 650 private: 651 DISALLOW_COPY_AND_ASSIGN(RedirectAllSyscallsPolicy); 652 }; 653 654 ResultExpr RedirectAllSyscallsPolicy::EvaluateSyscall(int sysno) const { 655 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 656 setenv(kSandboxDebuggingEnv, "t", 0); 657 Die::SuppressInfoMessages(true); 658 659 // Some system calls must always be allowed, if our policy wants to make 660 // use of UnsafeTrap() 661 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno)) 662 return Allow(); 663 return UnsafeTrap(AllowRedirectedSyscall, NULL); 664 } 665 666 int bus_handler_fd_ = -1; 667 668 void SigBusHandler(int, siginfo_t* info, void* void_context) { 669 BPF_ASSERT(write(bus_handler_fd_, "\x55", 1) == 1); 670 } 671 672 BPF_TEST_C(SandboxBPF, SigBus, RedirectAllSyscallsPolicy) { 673 // We use the SIGBUS bit in the signal mask as a thread-local boolean 674 // value in the implementation of UnsafeTrap(). This is obviously a bit 675 // of a hack that could conceivably interfere with code that uses SIGBUS 676 // in more traditional ways. This test verifies that basic functionality 677 // of SIGBUS is not impacted, but it is certainly possibly to construe 678 // more complex uses of signals where our use of the SIGBUS mask is not 679 // 100% transparent. This is expected behavior. 680 int fds[2]; 681 BPF_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == 0); 682 bus_handler_fd_ = fds[1]; 683 struct sigaction sa = {}; 684 sa.sa_sigaction = SigBusHandler; 685 sa.sa_flags = SA_SIGINFO; 686 BPF_ASSERT(sigaction(SIGBUS, &sa, NULL) == 0); 687 raise(SIGBUS); 688 char c = '\000'; 689 BPF_ASSERT(read(fds[0], &c, 1) == 1); 690 BPF_ASSERT(close(fds[0]) == 0); 691 BPF_ASSERT(close(fds[1]) == 0); 692 BPF_ASSERT(c == 0x55); 693 } 694 695 BPF_TEST_C(SandboxBPF, SigMask, RedirectAllSyscallsPolicy) { 696 // Signal masks are potentially tricky to handle. For instance, if we 697 // ever tried to update them from inside a Trap() or UnsafeTrap() handler, 698 // the call to sigreturn() at the end of the signal handler would undo 699 // all of our efforts. So, it makes sense to test that sigprocmask() 700 // works, even if we have a policy in place that makes use of UnsafeTrap(). 701 // In practice, this works because we force sigprocmask() to be handled 702 // entirely in the kernel. 703 sigset_t mask0, mask1, mask2; 704 705 // Call sigprocmask() to verify that SIGUSR2 wasn't blocked, if we didn't 706 // change the mask (it shouldn't have been, as it isn't blocked by default 707 // in POSIX). 708 // 709 // Use SIGUSR2 because Android seems to use SIGUSR1 for some purpose. 710 sigemptyset(&mask0); 711 BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, &mask1)); 712 BPF_ASSERT(!sigismember(&mask1, SIGUSR2)); 713 714 // Try again, and this time we verify that we can block it. This 715 // requires a second call to sigprocmask(). 716 sigaddset(&mask0, SIGUSR2); 717 BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, NULL)); 718 BPF_ASSERT(!sigprocmask(SIG_BLOCK, NULL, &mask2)); 719 BPF_ASSERT(sigismember(&mask2, SIGUSR2)); 720 } 721 722 BPF_TEST_C(SandboxBPF, UnsafeTrapWithErrno, RedirectAllSyscallsPolicy) { 723 // An UnsafeTrap() (or for that matter, a Trap()) has to report error 724 // conditions by returning an exit code in the range -1..-4096. This 725 // should happen automatically if using ForwardSyscall(). If the TrapFnc() 726 // uses some other method to make system calls, then it is responsible 727 // for computing the correct return code. 728 // This test verifies that ForwardSyscall() does the correct thing. 729 730 // The glibc system wrapper will ultimately set errno for us. So, from normal 731 // userspace, all of this should be completely transparent. 732 errno = 0; 733 BPF_ASSERT(close(-1) == -1); 734 BPF_ASSERT(errno == EBADF); 735 736 // Explicitly avoid the glibc wrapper. This is not normally the way anybody 737 // would make system calls, but it allows us to verify that we don't 738 // accidentally mess with errno, when we shouldn't. 739 errno = 0; 740 struct arch_seccomp_data args = {}; 741 args.nr = __NR_close; 742 args.args[0] = -1; 743 BPF_ASSERT(SandboxBPF::ForwardSyscall(args) == -EBADF); 744 BPF_ASSERT(errno == 0); 745 } 746 747 bool NoOpCallback() { 748 return true; 749 } 750 751 // Test a trap handler that makes use of a broker process to open(). 752 753 class InitializedOpenBroker { 754 public: 755 InitializedOpenBroker() : initialized_(false) { 756 std::vector<std::string> allowed_files; 757 allowed_files.push_back("/proc/allowed"); 758 allowed_files.push_back("/proc/cpuinfo"); 759 760 broker_process_.reset( 761 new BrokerProcess(EPERM, allowed_files, std::vector<std::string>())); 762 BPF_ASSERT(broker_process() != NULL); 763 BPF_ASSERT(broker_process_->Init(base::Bind(&NoOpCallback))); 764 765 initialized_ = true; 766 } 767 bool initialized() { return initialized_; } 768 class BrokerProcess* broker_process() { return broker_process_.get(); } 769 770 private: 771 bool initialized_; 772 scoped_ptr<class BrokerProcess> broker_process_; 773 DISALLOW_COPY_AND_ASSIGN(InitializedOpenBroker); 774 }; 775 776 intptr_t BrokerOpenTrapHandler(const struct arch_seccomp_data& args, 777 void* aux) { 778 BPF_ASSERT(aux); 779 BrokerProcess* broker_process = static_cast<BrokerProcess*>(aux); 780 switch (args.nr) { 781 case __NR_faccessat: // access is a wrapper of faccessat in android 782 BPF_ASSERT(static_cast<int>(args.args[0]) == AT_FDCWD); 783 return broker_process->Access(reinterpret_cast<const char*>(args.args[1]), 784 static_cast<int>(args.args[2])); 785 #if defined(__NR_access) 786 case __NR_access: 787 return broker_process->Access(reinterpret_cast<const char*>(args.args[0]), 788 static_cast<int>(args.args[1])); 789 #endif 790 #if defined(__NR_open) 791 case __NR_open: 792 return broker_process->Open(reinterpret_cast<const char*>(args.args[0]), 793 static_cast<int>(args.args[1])); 794 #endif 795 case __NR_openat: 796 // We only call open() so if we arrive here, it's because glibc uses 797 // the openat() system call. 798 BPF_ASSERT(static_cast<int>(args.args[0]) == AT_FDCWD); 799 return broker_process->Open(reinterpret_cast<const char*>(args.args[1]), 800 static_cast<int>(args.args[2])); 801 default: 802 BPF_ASSERT(false); 803 return -ENOSYS; 804 } 805 } 806 807 class DenyOpenPolicy : public SandboxBPFDSLPolicy { 808 public: 809 explicit DenyOpenPolicy(InitializedOpenBroker* iob) : iob_(iob) {} 810 virtual ~DenyOpenPolicy() {} 811 812 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE { 813 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 814 815 switch (sysno) { 816 case __NR_faccessat: 817 #if defined(__NR_access) 818 case __NR_access: 819 #endif 820 #if defined(__NR_open) 821 case __NR_open: 822 #endif 823 case __NR_openat: 824 // We get a InitializedOpenBroker class, but our trap handler wants 825 // the BrokerProcess object. 826 return Trap(BrokerOpenTrapHandler, iob_->broker_process()); 827 default: 828 return Allow(); 829 } 830 } 831 832 private: 833 InitializedOpenBroker* iob_; 834 835 DISALLOW_COPY_AND_ASSIGN(DenyOpenPolicy); 836 }; 837 838 // We use a InitializedOpenBroker class, so that we can run unsandboxed 839 // code in its constructor, which is the only way to do so in a BPF_TEST. 840 BPF_TEST(SandboxBPF, 841 UseOpenBroker, 842 DenyOpenPolicy, 843 InitializedOpenBroker /* (*BPF_AUX) */) { 844 BPF_ASSERT(BPF_AUX->initialized()); 845 BrokerProcess* broker_process = BPF_AUX->broker_process(); 846 BPF_ASSERT(broker_process != NULL); 847 848 // First, use the broker "manually" 849 BPF_ASSERT(broker_process->Open("/proc/denied", O_RDONLY) == -EPERM); 850 BPF_ASSERT(broker_process->Access("/proc/denied", R_OK) == -EPERM); 851 BPF_ASSERT(broker_process->Open("/proc/allowed", O_RDONLY) == -ENOENT); 852 BPF_ASSERT(broker_process->Access("/proc/allowed", R_OK) == -ENOENT); 853 854 // Now use glibc's open() as an external library would. 855 BPF_ASSERT(open("/proc/denied", O_RDONLY) == -1); 856 BPF_ASSERT(errno == EPERM); 857 858 BPF_ASSERT(open("/proc/allowed", O_RDONLY) == -1); 859 BPF_ASSERT(errno == ENOENT); 860 861 // Also test glibc's openat(), some versions of libc use it transparently 862 // instead of open(). 863 BPF_ASSERT(openat(AT_FDCWD, "/proc/denied", O_RDONLY) == -1); 864 BPF_ASSERT(errno == EPERM); 865 866 BPF_ASSERT(openat(AT_FDCWD, "/proc/allowed", O_RDONLY) == -1); 867 BPF_ASSERT(errno == ENOENT); 868 869 // And test glibc's access(). 870 BPF_ASSERT(access("/proc/denied", R_OK) == -1); 871 BPF_ASSERT(errno == EPERM); 872 873 BPF_ASSERT(access("/proc/allowed", R_OK) == -1); 874 BPF_ASSERT(errno == ENOENT); 875 876 // This is also white listed and does exist. 877 int cpu_info_access = access("/proc/cpuinfo", R_OK); 878 BPF_ASSERT(cpu_info_access == 0); 879 int cpu_info_fd = open("/proc/cpuinfo", O_RDONLY); 880 BPF_ASSERT(cpu_info_fd >= 0); 881 char buf[1024]; 882 BPF_ASSERT(read(cpu_info_fd, buf, sizeof(buf)) > 0); 883 } 884 885 // Simple test demonstrating how to use SandboxBPF::Cond() 886 887 class SimpleCondTestPolicy : public SandboxBPFDSLPolicy { 888 public: 889 SimpleCondTestPolicy() {} 890 virtual ~SimpleCondTestPolicy() {} 891 892 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE; 893 894 private: 895 DISALLOW_COPY_AND_ASSIGN(SimpleCondTestPolicy); 896 }; 897 898 ResultExpr SimpleCondTestPolicy::EvaluateSyscall(int sysno) const { 899 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 900 901 // We deliberately return unusual errno values upon failure, so that we 902 // can uniquely test for these values. In a "real" policy, you would want 903 // to return more traditional values. 904 int flags_argument_position = -1; 905 switch (sysno) { 906 #if defined(__NR_open) 907 case __NR_open: 908 flags_argument_position = 1; 909 #endif 910 case __NR_openat: { // open can be a wrapper for openat(2). 911 if (sysno == __NR_openat) 912 flags_argument_position = 2; 913 914 // Allow opening files for reading, but don't allow writing. 915 COMPILE_ASSERT(O_RDONLY == 0, O_RDONLY_must_be_all_zero_bits); 916 const Arg<int> flags(flags_argument_position); 917 return If((flags & O_ACCMODE) != 0, Error(EROFS)).Else(Allow()); 918 } 919 case __NR_prctl: { 920 // Allow prctl(PR_SET_DUMPABLE) and prctl(PR_GET_DUMPABLE), but 921 // disallow everything else. 922 const Arg<int> option(0); 923 return If(option == PR_SET_DUMPABLE || option == PR_GET_DUMPABLE, Allow()) 924 .Else(Error(ENOMEM)); 925 } 926 default: 927 return Allow(); 928 } 929 } 930 931 BPF_TEST_C(SandboxBPF, SimpleCondTest, SimpleCondTestPolicy) { 932 int fd; 933 BPF_ASSERT((fd = open("/proc/self/comm", O_RDWR)) == -1); 934 BPF_ASSERT(errno == EROFS); 935 BPF_ASSERT((fd = open("/proc/self/comm", O_RDONLY)) >= 0); 936 close(fd); 937 938 int ret; 939 BPF_ASSERT((ret = prctl(PR_GET_DUMPABLE)) >= 0); 940 BPF_ASSERT(prctl(PR_SET_DUMPABLE, 1 - ret) == 0); 941 BPF_ASSERT(prctl(PR_GET_ENDIAN, &ret) == -1); 942 BPF_ASSERT(errno == ENOMEM); 943 } 944 945 // This test exercises the SandboxBPF::Cond() method by building a complex 946 // tree of conditional equality operations. It then makes system calls and 947 // verifies that they return the values that we expected from our BPF 948 // program. 949 class EqualityStressTest { 950 public: 951 EqualityStressTest() { 952 // We want a deterministic test 953 srand(0); 954 955 // Iterates over system call numbers and builds a random tree of 956 // equality tests. 957 // We are actually constructing a graph of ArgValue objects. This 958 // graph will later be used to a) compute our sandbox policy, and 959 // b) drive the code that verifies the output from the BPF program. 960 COMPILE_ASSERT( 961 kNumTestCases < (int)(MAX_PUBLIC_SYSCALL - MIN_SYSCALL - 10), 962 num_test_cases_must_be_significantly_smaller_than_num_system_calls); 963 for (int sysno = MIN_SYSCALL, end = kNumTestCases; sysno < end; ++sysno) { 964 if (IsReservedSyscall(sysno)) { 965 // Skip reserved system calls. This ensures that our test frame 966 // work isn't impacted by the fact that we are overriding 967 // a lot of different system calls. 968 ++end; 969 arg_values_.push_back(NULL); 970 } else { 971 arg_values_.push_back( 972 RandomArgValue(rand() % kMaxArgs, 0, rand() % kMaxArgs)); 973 } 974 } 975 } 976 977 ~EqualityStressTest() { 978 for (std::vector<ArgValue*>::iterator iter = arg_values_.begin(); 979 iter != arg_values_.end(); 980 ++iter) { 981 DeleteArgValue(*iter); 982 } 983 } 984 985 ResultExpr Policy(int sysno) { 986 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 987 if (sysno < 0 || sysno >= (int)arg_values_.size() || 988 IsReservedSyscall(sysno)) { 989 // We only return ErrorCode values for the system calls that 990 // are part of our test data. Every other system call remains 991 // allowed. 992 return Allow(); 993 } else { 994 // ToErrorCode() turns an ArgValue object into an ErrorCode that is 995 // suitable for use by a sandbox policy. 996 return ToErrorCode(arg_values_[sysno]); 997 } 998 } 999 1000 void VerifyFilter() { 1001 // Iterate over all system calls. Skip the system calls that have 1002 // previously been determined as being reserved. 1003 for (int sysno = 0; sysno < (int)arg_values_.size(); ++sysno) { 1004 if (!arg_values_[sysno]) { 1005 // Skip reserved system calls. 1006 continue; 1007 } 1008 // Verify that system calls return the values that we expect them to 1009 // return. This involves passing different combinations of system call 1010 // parameters in order to exercise all possible code paths through the 1011 // BPF filter program. 1012 // We arbitrarily start by setting all six system call arguments to 1013 // zero. And we then recursive traverse our tree of ArgValues to 1014 // determine the necessary combinations of parameters. 1015 intptr_t args[6] = {}; 1016 Verify(sysno, args, *arg_values_[sysno]); 1017 } 1018 } 1019 1020 private: 1021 struct ArgValue { 1022 int argno; // Argument number to inspect. 1023 int size; // Number of test cases (must be > 0). 1024 struct Tests { 1025 uint32_t k_value; // Value to compare syscall arg against. 1026 int err; // If non-zero, errno value to return. 1027 struct ArgValue* arg_value; // Otherwise, more args needs inspecting. 1028 }* tests; 1029 int err; // If none of the tests passed, this is what 1030 struct ArgValue* arg_value; // we'll return (this is the "else" branch). 1031 }; 1032 1033 bool IsReservedSyscall(int sysno) { 1034 // There are a handful of system calls that we should never use in our 1035 // test cases. These system calls are needed to allow the test framework 1036 // to run properly. 1037 // If we wanted to write fully generic code, there are more system calls 1038 // that could be listed here, and it is quite difficult to come up with a 1039 // truly comprehensive list. After all, we are deliberately making system 1040 // calls unavailable. In practice, we have a pretty good idea of the system 1041 // calls that will be made by this particular test. So, this small list is 1042 // sufficient. But if anybody copy'n'pasted this code for other uses, they 1043 // would have to review that the list. 1044 return sysno == __NR_read || sysno == __NR_write || sysno == __NR_exit || 1045 sysno == __NR_exit_group || sysno == __NR_restart_syscall; 1046 } 1047 1048 ArgValue* RandomArgValue(int argno, int args_mask, int remaining_args) { 1049 // Create a new ArgValue and fill it with random data. We use as bit mask 1050 // to keep track of the system call parameters that have previously been 1051 // set; this ensures that we won't accidentally define a contradictory 1052 // set of equality tests. 1053 struct ArgValue* arg_value = new ArgValue(); 1054 args_mask |= 1 << argno; 1055 arg_value->argno = argno; 1056 1057 // Apply some restrictions on just how complex our tests can be. 1058 // Otherwise, we end up with a BPF program that is too complicated for 1059 // the kernel to load. 1060 int fan_out = kMaxFanOut; 1061 if (remaining_args > 3) { 1062 fan_out = 1; 1063 } else if (remaining_args > 2) { 1064 fan_out = 2; 1065 } 1066 1067 // Create a couple of different test cases with randomized values that 1068 // we want to use when comparing system call parameter number "argno". 1069 arg_value->size = rand() % fan_out + 1; 1070 arg_value->tests = new ArgValue::Tests[arg_value->size]; 1071 1072 uint32_t k_value = rand(); 1073 for (int n = 0; n < arg_value->size; ++n) { 1074 // Ensure that we have unique values 1075 k_value += rand() % (RAND_MAX / (kMaxFanOut + 1)) + 1; 1076 1077 // There are two possible types of nodes. Either this is a leaf node; 1078 // in that case, we have completed all the equality tests that we 1079 // wanted to perform, and we can now compute a random "errno" value that 1080 // we should return. Or this is part of a more complex boolean 1081 // expression; in that case, we have to recursively add tests for some 1082 // of system call parameters that we have not yet included in our 1083 // tests. 1084 arg_value->tests[n].k_value = k_value; 1085 if (!remaining_args || (rand() & 1)) { 1086 arg_value->tests[n].err = (rand() % 1000) + 1; 1087 arg_value->tests[n].arg_value = NULL; 1088 } else { 1089 arg_value->tests[n].err = 0; 1090 arg_value->tests[n].arg_value = 1091 RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1); 1092 } 1093 } 1094 // Finally, we have to define what we should return if none of the 1095 // previous equality tests pass. Again, we can either deal with a leaf 1096 // node, or we can randomly add another couple of tests. 1097 if (!remaining_args || (rand() & 1)) { 1098 arg_value->err = (rand() % 1000) + 1; 1099 arg_value->arg_value = NULL; 1100 } else { 1101 arg_value->err = 0; 1102 arg_value->arg_value = 1103 RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1); 1104 } 1105 // We have now built a new (sub-)tree of ArgValues defining a set of 1106 // boolean expressions for testing random system call arguments against 1107 // random values. Return this tree to our caller. 1108 return arg_value; 1109 } 1110 1111 int RandomArg(int args_mask) { 1112 // Compute a random system call parameter number. 1113 int argno = rand() % kMaxArgs; 1114 1115 // Make sure that this same parameter number has not previously been 1116 // used. Otherwise, we could end up with a test that is impossible to 1117 // satisfy (e.g. args[0] == 1 && args[0] == 2). 1118 while (args_mask & (1 << argno)) { 1119 argno = (argno + 1) % kMaxArgs; 1120 } 1121 return argno; 1122 } 1123 1124 void DeleteArgValue(ArgValue* arg_value) { 1125 // Delete an ArgValue and all of its child nodes. This requires 1126 // recursively descending into the tree. 1127 if (arg_value) { 1128 if (arg_value->size) { 1129 for (int n = 0; n < arg_value->size; ++n) { 1130 if (!arg_value->tests[n].err) { 1131 DeleteArgValue(arg_value->tests[n].arg_value); 1132 } 1133 } 1134 delete[] arg_value->tests; 1135 } 1136 if (!arg_value->err) { 1137 DeleteArgValue(arg_value->arg_value); 1138 } 1139 delete arg_value; 1140 } 1141 } 1142 1143 ResultExpr ToErrorCode(ArgValue* arg_value) { 1144 // Compute the ResultExpr that should be returned, if none of our 1145 // tests succeed (i.e. the system call parameter doesn't match any 1146 // of the values in arg_value->tests[].k_value). 1147 ResultExpr err; 1148 if (arg_value->err) { 1149 // If this was a leaf node, return the errno value that we expect to 1150 // return from the BPF filter program. 1151 err = Error(arg_value->err); 1152 } else { 1153 // If this wasn't a leaf node yet, recursively descend into the rest 1154 // of the tree. This will end up adding a few more SandboxBPF::Cond() 1155 // tests to our ErrorCode. 1156 err = ToErrorCode(arg_value->arg_value); 1157 } 1158 1159 // Now, iterate over all the test cases that we want to compare against. 1160 // This builds a chain of SandboxBPF::Cond() tests 1161 // (aka "if ... elif ... elif ... elif ... fi") 1162 for (int n = arg_value->size; n-- > 0;) { 1163 ResultExpr matched; 1164 // Again, we distinguish between leaf nodes and subtrees. 1165 if (arg_value->tests[n].err) { 1166 matched = Error(arg_value->tests[n].err); 1167 } else { 1168 matched = ToErrorCode(arg_value->tests[n].arg_value); 1169 } 1170 // For now, all of our tests are limited to 32bit. 1171 // We have separate tests that check the behavior of 32bit vs. 64bit 1172 // conditional expressions. 1173 const Arg<uint32_t> arg(arg_value->argno); 1174 err = If(arg == arg_value->tests[n].k_value, matched).Else(err); 1175 } 1176 return err; 1177 } 1178 1179 void Verify(int sysno, intptr_t* args, const ArgValue& arg_value) { 1180 uint32_t mismatched = 0; 1181 // Iterate over all the k_values in arg_value.tests[] and verify that 1182 // we see the expected return values from system calls, when we pass 1183 // the k_value as a parameter in a system call. 1184 for (int n = arg_value.size; n-- > 0;) { 1185 mismatched += arg_value.tests[n].k_value; 1186 args[arg_value.argno] = arg_value.tests[n].k_value; 1187 if (arg_value.tests[n].err) { 1188 VerifyErrno(sysno, args, arg_value.tests[n].err); 1189 } else { 1190 Verify(sysno, args, *arg_value.tests[n].arg_value); 1191 } 1192 } 1193 // Find a k_value that doesn't match any of the k_values in 1194 // arg_value.tests[]. In most cases, the current value of "mismatched" 1195 // would fit this requirement. But on the off-chance that it happens 1196 // to collide, we double-check. 1197 try_again: 1198 for (int n = arg_value.size; n-- > 0;) { 1199 if (mismatched == arg_value.tests[n].k_value) { 1200 ++mismatched; 1201 goto try_again; 1202 } 1203 } 1204 // Now verify that we see the expected return value from system calls, 1205 // if we pass a value that doesn't match any of the conditions (i.e. this 1206 // is testing the "else" clause of the conditions). 1207 args[arg_value.argno] = mismatched; 1208 if (arg_value.err) { 1209 VerifyErrno(sysno, args, arg_value.err); 1210 } else { 1211 Verify(sysno, args, *arg_value.arg_value); 1212 } 1213 // Reset args[arg_value.argno]. This is not technically needed, but it 1214 // makes it easier to reason about the correctness of our tests. 1215 args[arg_value.argno] = 0; 1216 } 1217 1218 void VerifyErrno(int sysno, intptr_t* args, int err) { 1219 // We installed BPF filters that return different errno values 1220 // based on the system call number and the parameters that we decided 1221 // to pass in. Verify that this condition holds true. 1222 BPF_ASSERT( 1223 Syscall::Call( 1224 sysno, args[0], args[1], args[2], args[3], args[4], args[5]) == 1225 -err); 1226 } 1227 1228 // Vector of ArgValue trees. These trees define all the possible boolean 1229 // expressions that we want to turn into a BPF filter program. 1230 std::vector<ArgValue*> arg_values_; 1231 1232 // Don't increase these values. We are pushing the limits of the maximum 1233 // BPF program that the kernel will allow us to load. If the values are 1234 // increased too much, the test will start failing. 1235 #if defined(__aarch64__) 1236 static const int kNumTestCases = 30; 1237 #else 1238 static const int kNumTestCases = 40; 1239 #endif 1240 static const int kMaxFanOut = 3; 1241 static const int kMaxArgs = 6; 1242 }; 1243 1244 class EqualityStressTestPolicy : public SandboxBPFDSLPolicy { 1245 public: 1246 explicit EqualityStressTestPolicy(EqualityStressTest* aux) : aux_(aux) {} 1247 virtual ~EqualityStressTestPolicy() {} 1248 1249 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE { 1250 return aux_->Policy(sysno); 1251 } 1252 1253 private: 1254 EqualityStressTest* aux_; 1255 1256 DISALLOW_COPY_AND_ASSIGN(EqualityStressTestPolicy); 1257 }; 1258 1259 BPF_TEST(SandboxBPF, 1260 EqualityTests, 1261 EqualityStressTestPolicy, 1262 EqualityStressTest /* (*BPF_AUX) */) { 1263 BPF_AUX->VerifyFilter(); 1264 } 1265 1266 class EqualityArgumentWidthPolicy : public SandboxBPFDSLPolicy { 1267 public: 1268 EqualityArgumentWidthPolicy() {} 1269 virtual ~EqualityArgumentWidthPolicy() {} 1270 1271 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE; 1272 1273 private: 1274 DISALLOW_COPY_AND_ASSIGN(EqualityArgumentWidthPolicy); 1275 }; 1276 1277 ResultExpr EqualityArgumentWidthPolicy::EvaluateSyscall(int sysno) const { 1278 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 1279 if (sysno == __NR_uname) { 1280 const Arg<int> option(0); 1281 const Arg<uint32_t> arg32(1); 1282 const Arg<uint64_t> arg64(1); 1283 return Switch(option) 1284 .Case(0, If(arg32 == 0x55555555, Error(1)).Else(Error(2))) 1285 #if __SIZEOF_POINTER__ > 4 1286 .Case(1, If(arg64 == 0x55555555AAAAAAAAULL, Error(1)).Else(Error(2))) 1287 #endif 1288 .Default(Error(3)); 1289 } 1290 return Allow(); 1291 } 1292 1293 BPF_TEST_C(SandboxBPF, EqualityArgumentWidth, EqualityArgumentWidthPolicy) { 1294 BPF_ASSERT(Syscall::Call(__NR_uname, 0, 0x55555555) == -1); 1295 BPF_ASSERT(Syscall::Call(__NR_uname, 0, 0xAAAAAAAA) == -2); 1296 #if __SIZEOF_POINTER__ > 4 1297 // On 32bit machines, there is no way to pass a 64bit argument through the 1298 // syscall interface. So, we have to skip the part of the test that requires 1299 // 64bit arguments. 1300 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x55555555AAAAAAAAULL) == -1); 1301 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x5555555500000000ULL) == -2); 1302 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x5555555511111111ULL) == -2); 1303 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x11111111AAAAAAAAULL) == -2); 1304 #endif 1305 } 1306 1307 #if __SIZEOF_POINTER__ > 4 1308 // On 32bit machines, there is no way to pass a 64bit argument through the 1309 // syscall interface. So, we have to skip the part of the test that requires 1310 // 64bit arguments. 1311 BPF_DEATH_TEST_C(SandboxBPF, 1312 EqualityArgumentUnallowed64bit, 1313 DEATH_MESSAGE("Unexpected 64bit argument detected"), 1314 EqualityArgumentWidthPolicy) { 1315 Syscall::Call(__NR_uname, 0, 0x5555555555555555ULL); 1316 } 1317 #endif 1318 1319 class EqualityWithNegativeArgumentsPolicy : public SandboxBPFDSLPolicy { 1320 public: 1321 EqualityWithNegativeArgumentsPolicy() {} 1322 virtual ~EqualityWithNegativeArgumentsPolicy() {} 1323 1324 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE { 1325 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 1326 if (sysno == __NR_uname) { 1327 // TODO(mdempsky): This currently can't be Arg<int> because then 1328 // 0xFFFFFFFF will be treated as a (signed) int, and then when 1329 // Arg::EqualTo casts it to uint64_t, it will be sign extended. 1330 const Arg<unsigned> arg(0); 1331 return If(arg == 0xFFFFFFFF, Error(1)).Else(Error(2)); 1332 } 1333 return Allow(); 1334 } 1335 1336 private: 1337 DISALLOW_COPY_AND_ASSIGN(EqualityWithNegativeArgumentsPolicy); 1338 }; 1339 1340 BPF_TEST_C(SandboxBPF, 1341 EqualityWithNegativeArguments, 1342 EqualityWithNegativeArgumentsPolicy) { 1343 BPF_ASSERT(Syscall::Call(__NR_uname, 0xFFFFFFFF) == -1); 1344 BPF_ASSERT(Syscall::Call(__NR_uname, -1) == -1); 1345 BPF_ASSERT(Syscall::Call(__NR_uname, -1LL) == -1); 1346 } 1347 1348 #if __SIZEOF_POINTER__ > 4 1349 BPF_DEATH_TEST_C(SandboxBPF, 1350 EqualityWithNegative64bitArguments, 1351 DEATH_MESSAGE("Unexpected 64bit argument detected"), 1352 EqualityWithNegativeArgumentsPolicy) { 1353 // When expecting a 32bit system call argument, we look at the MSB of the 1354 // 64bit value and allow both "0" and "-1". But the latter is allowed only 1355 // iff the LSB was negative. So, this death test should error out. 1356 BPF_ASSERT(Syscall::Call(__NR_uname, 0xFFFFFFFF00000000LL) == -1); 1357 } 1358 #endif 1359 1360 class AllBitTestPolicy : public SandboxBPFDSLPolicy { 1361 public: 1362 AllBitTestPolicy() {} 1363 virtual ~AllBitTestPolicy() {} 1364 1365 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE; 1366 1367 private: 1368 static ResultExpr HasAllBits32(uint32_t bits); 1369 static ResultExpr HasAllBits64(uint64_t bits); 1370 1371 DISALLOW_COPY_AND_ASSIGN(AllBitTestPolicy); 1372 }; 1373 1374 ResultExpr AllBitTestPolicy::HasAllBits32(uint32_t bits) { 1375 if (bits == 0) { 1376 return Error(1); 1377 } 1378 const Arg<uint32_t> arg(1); 1379 return If((arg & bits) == bits, Error(1)).Else(Error(0)); 1380 } 1381 1382 ResultExpr AllBitTestPolicy::HasAllBits64(uint64_t bits) { 1383 if (bits == 0) { 1384 return Error(1); 1385 } 1386 const Arg<uint64_t> arg(1); 1387 return If((arg & bits) == bits, Error(1)).Else(Error(0)); 1388 } 1389 1390 ResultExpr AllBitTestPolicy::EvaluateSyscall(int sysno) const { 1391 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 1392 // Test masked-equality cases that should trigger the "has all bits" 1393 // peephole optimizations. We try to find bitmasks that could conceivably 1394 // touch corner cases. 1395 // For all of these tests, we override the uname(). We can make use with 1396 // a single system call number, as we use the first system call argument to 1397 // select the different bit masks that we want to test against. 1398 if (sysno == __NR_uname) { 1399 const Arg<int> option(0); 1400 return Switch(option) 1401 .Case(0, HasAllBits32(0x0)) 1402 .Case(1, HasAllBits32(0x1)) 1403 .Case(2, HasAllBits32(0x3)) 1404 .Case(3, HasAllBits32(0x80000000)) 1405 #if __SIZEOF_POINTER__ > 4 1406 .Case(4, HasAllBits64(0x0)) 1407 .Case(5, HasAllBits64(0x1)) 1408 .Case(6, HasAllBits64(0x3)) 1409 .Case(7, HasAllBits64(0x80000000)) 1410 .Case(8, HasAllBits64(0x100000000ULL)) 1411 .Case(9, HasAllBits64(0x300000000ULL)) 1412 .Case(10, HasAllBits64(0x100000001ULL)) 1413 #endif 1414 .Default(Kill("Invalid test case number")); 1415 } 1416 return Allow(); 1417 } 1418 1419 // Define a macro that performs tests using our test policy. 1420 // NOTE: Not all of the arguments in this macro are actually used! 1421 // They are here just to serve as documentation of the conditions 1422 // implemented in the test policy. 1423 // Most notably, "op" and "mask" are unused by the macro. If you want 1424 // to make changes to these values, you will have to edit the 1425 // test policy instead. 1426 #define BITMASK_TEST(testcase, arg, op, mask, expected_value) \ 1427 BPF_ASSERT(Syscall::Call(__NR_uname, (testcase), (arg)) == (expected_value)) 1428 1429 // Our uname() system call returns ErrorCode(1) for success and 1430 // ErrorCode(0) for failure. Syscall::Call() turns this into an 1431 // exit code of -1 or 0. 1432 #define EXPECT_FAILURE 0 1433 #define EXPECT_SUCCESS -1 1434 1435 // A couple of our tests behave differently on 32bit and 64bit systems, as 1436 // there is no way for a 32bit system call to pass in a 64bit system call 1437 // argument "arg". 1438 // We expect these tests to succeed on 64bit systems, but to tail on 32bit 1439 // systems. 1440 #define EXPT64_SUCCESS (sizeof(void*) > 4 ? EXPECT_SUCCESS : EXPECT_FAILURE) 1441 BPF_TEST_C(SandboxBPF, AllBitTests, AllBitTestPolicy) { 1442 // 32bit test: all of 0x0 (should always be true) 1443 BITMASK_TEST( 0, 0, ALLBITS32, 0, EXPECT_SUCCESS); 1444 BITMASK_TEST( 0, 1, ALLBITS32, 0, EXPECT_SUCCESS); 1445 BITMASK_TEST( 0, 3, ALLBITS32, 0, EXPECT_SUCCESS); 1446 BITMASK_TEST( 0, 0xFFFFFFFFU, ALLBITS32, 0, EXPECT_SUCCESS); 1447 BITMASK_TEST( 0, -1LL, ALLBITS32, 0, EXPECT_SUCCESS); 1448 1449 // 32bit test: all of 0x1 1450 BITMASK_TEST( 1, 0, ALLBITS32, 0x1, EXPECT_FAILURE); 1451 BITMASK_TEST( 1, 1, ALLBITS32, 0x1, EXPECT_SUCCESS); 1452 BITMASK_TEST( 1, 2, ALLBITS32, 0x1, EXPECT_FAILURE); 1453 BITMASK_TEST( 1, 3, ALLBITS32, 0x1, EXPECT_SUCCESS); 1454 1455 // 32bit test: all of 0x3 1456 BITMASK_TEST( 2, 0, ALLBITS32, 0x3, EXPECT_FAILURE); 1457 BITMASK_TEST( 2, 1, ALLBITS32, 0x3, EXPECT_FAILURE); 1458 BITMASK_TEST( 2, 2, ALLBITS32, 0x3, EXPECT_FAILURE); 1459 BITMASK_TEST( 2, 3, ALLBITS32, 0x3, EXPECT_SUCCESS); 1460 BITMASK_TEST( 2, 7, ALLBITS32, 0x3, EXPECT_SUCCESS); 1461 1462 // 32bit test: all of 0x80000000 1463 BITMASK_TEST( 3, 0, ALLBITS32, 0x80000000, EXPECT_FAILURE); 1464 BITMASK_TEST( 3, 0x40000000U, ALLBITS32, 0x80000000, EXPECT_FAILURE); 1465 BITMASK_TEST( 3, 0x80000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS); 1466 BITMASK_TEST( 3, 0xC0000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS); 1467 BITMASK_TEST( 3, -0x80000000LL, ALLBITS32, 0x80000000, EXPECT_SUCCESS); 1468 1469 #if __SIZEOF_POINTER__ > 4 1470 // 64bit test: all of 0x0 (should always be true) 1471 BITMASK_TEST( 4, 0, ALLBITS64, 0, EXPECT_SUCCESS); 1472 BITMASK_TEST( 4, 1, ALLBITS64, 0, EXPECT_SUCCESS); 1473 BITMASK_TEST( 4, 3, ALLBITS64, 0, EXPECT_SUCCESS); 1474 BITMASK_TEST( 4, 0xFFFFFFFFU, ALLBITS64, 0, EXPECT_SUCCESS); 1475 BITMASK_TEST( 4, 0x100000000LL, ALLBITS64, 0, EXPECT_SUCCESS); 1476 BITMASK_TEST( 4, 0x300000000LL, ALLBITS64, 0, EXPECT_SUCCESS); 1477 BITMASK_TEST( 4,0x8000000000000000LL, ALLBITS64, 0, EXPECT_SUCCESS); 1478 BITMASK_TEST( 4, -1LL, ALLBITS64, 0, EXPECT_SUCCESS); 1479 1480 // 64bit test: all of 0x1 1481 BITMASK_TEST( 5, 0, ALLBITS64, 1, EXPECT_FAILURE); 1482 BITMASK_TEST( 5, 1, ALLBITS64, 1, EXPECT_SUCCESS); 1483 BITMASK_TEST( 5, 2, ALLBITS64, 1, EXPECT_FAILURE); 1484 BITMASK_TEST( 5, 3, ALLBITS64, 1, EXPECT_SUCCESS); 1485 BITMASK_TEST( 5, 0x100000000LL, ALLBITS64, 1, EXPECT_FAILURE); 1486 BITMASK_TEST( 5, 0x100000001LL, ALLBITS64, 1, EXPECT_SUCCESS); 1487 BITMASK_TEST( 5, 0x100000002LL, ALLBITS64, 1, EXPECT_FAILURE); 1488 BITMASK_TEST( 5, 0x100000003LL, ALLBITS64, 1, EXPECT_SUCCESS); 1489 1490 // 64bit test: all of 0x3 1491 BITMASK_TEST( 6, 0, ALLBITS64, 3, EXPECT_FAILURE); 1492 BITMASK_TEST( 6, 1, ALLBITS64, 3, EXPECT_FAILURE); 1493 BITMASK_TEST( 6, 2, ALLBITS64, 3, EXPECT_FAILURE); 1494 BITMASK_TEST( 6, 3, ALLBITS64, 3, EXPECT_SUCCESS); 1495 BITMASK_TEST( 6, 7, ALLBITS64, 3, EXPECT_SUCCESS); 1496 BITMASK_TEST( 6, 0x100000000LL, ALLBITS64, 3, EXPECT_FAILURE); 1497 BITMASK_TEST( 6, 0x100000001LL, ALLBITS64, 3, EXPECT_FAILURE); 1498 BITMASK_TEST( 6, 0x100000002LL, ALLBITS64, 3, EXPECT_FAILURE); 1499 BITMASK_TEST( 6, 0x100000003LL, ALLBITS64, 3, EXPECT_SUCCESS); 1500 BITMASK_TEST( 6, 0x100000007LL, ALLBITS64, 3, EXPECT_SUCCESS); 1501 1502 // 64bit test: all of 0x80000000 1503 BITMASK_TEST( 7, 0, ALLBITS64, 0x80000000, EXPECT_FAILURE); 1504 BITMASK_TEST( 7, 0x40000000U, ALLBITS64, 0x80000000, EXPECT_FAILURE); 1505 BITMASK_TEST( 7, 0x80000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS); 1506 BITMASK_TEST( 7, 0xC0000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS); 1507 BITMASK_TEST( 7, -0x80000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS); 1508 BITMASK_TEST( 7, 0x100000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE); 1509 BITMASK_TEST( 7, 0x140000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE); 1510 BITMASK_TEST( 7, 0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS); 1511 BITMASK_TEST( 7, 0x1C0000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS); 1512 BITMASK_TEST( 7, -0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS); 1513 1514 // 64bit test: all of 0x100000000 1515 BITMASK_TEST( 8, 0x000000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE); 1516 BITMASK_TEST( 8, 0x100000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS); 1517 BITMASK_TEST( 8, 0x200000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE); 1518 BITMASK_TEST( 8, 0x300000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS); 1519 BITMASK_TEST( 8, 0x000000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE); 1520 BITMASK_TEST( 8, 0x100000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS); 1521 BITMASK_TEST( 8, 0x200000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE); 1522 BITMASK_TEST( 8, 0x300000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS); 1523 1524 // 64bit test: all of 0x300000000 1525 BITMASK_TEST( 9, 0x000000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE); 1526 BITMASK_TEST( 9, 0x100000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE); 1527 BITMASK_TEST( 9, 0x200000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE); 1528 BITMASK_TEST( 9, 0x300000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS); 1529 BITMASK_TEST( 9, 0x700000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS); 1530 BITMASK_TEST( 9, 0x000000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE); 1531 BITMASK_TEST( 9, 0x100000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE); 1532 BITMASK_TEST( 9, 0x200000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE); 1533 BITMASK_TEST( 9, 0x300000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS); 1534 BITMASK_TEST( 9, 0x700000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS); 1535 1536 // 64bit test: all of 0x100000001 1537 BITMASK_TEST(10, 0x000000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE); 1538 BITMASK_TEST(10, 0x000000001LL, ALLBITS64,0x100000001, EXPECT_FAILURE); 1539 BITMASK_TEST(10, 0x100000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE); 1540 BITMASK_TEST(10, 0x100000001LL, ALLBITS64,0x100000001, EXPT64_SUCCESS); 1541 BITMASK_TEST(10, 0xFFFFFFFFU, ALLBITS64,0x100000001, EXPECT_FAILURE); 1542 BITMASK_TEST(10, -1L, ALLBITS64,0x100000001, EXPT64_SUCCESS); 1543 #endif 1544 } 1545 1546 class AnyBitTestPolicy : public SandboxBPFDSLPolicy { 1547 public: 1548 AnyBitTestPolicy() {} 1549 virtual ~AnyBitTestPolicy() {} 1550 1551 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE; 1552 1553 private: 1554 static ResultExpr HasAnyBits32(uint32_t); 1555 static ResultExpr HasAnyBits64(uint64_t); 1556 1557 DISALLOW_COPY_AND_ASSIGN(AnyBitTestPolicy); 1558 }; 1559 1560 ResultExpr AnyBitTestPolicy::HasAnyBits32(uint32_t bits) { 1561 if (bits == 0) { 1562 return Error(0); 1563 } 1564 const Arg<uint32_t> arg(1); 1565 return If((arg & bits) != 0, Error(1)).Else(Error(0)); 1566 } 1567 1568 ResultExpr AnyBitTestPolicy::HasAnyBits64(uint64_t bits) { 1569 if (bits == 0) { 1570 return Error(0); 1571 } 1572 const Arg<uint64_t> arg(1); 1573 return If((arg & bits) != 0, Error(1)).Else(Error(0)); 1574 } 1575 1576 ResultExpr AnyBitTestPolicy::EvaluateSyscall(int sysno) const { 1577 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 1578 // Test masked-equality cases that should trigger the "has any bits" 1579 // peephole optimizations. We try to find bitmasks that could conceivably 1580 // touch corner cases. 1581 // For all of these tests, we override the uname(). We can make use with 1582 // a single system call number, as we use the first system call argument to 1583 // select the different bit masks that we want to test against. 1584 if (sysno == __NR_uname) { 1585 const Arg<int> option(0); 1586 return Switch(option) 1587 .Case(0, HasAnyBits32(0x0)) 1588 .Case(1, HasAnyBits32(0x1)) 1589 .Case(2, HasAnyBits32(0x3)) 1590 .Case(3, HasAnyBits32(0x80000000)) 1591 #if __SIZEOF_POINTER__ > 4 1592 .Case(4, HasAnyBits64(0x0)) 1593 .Case(5, HasAnyBits64(0x1)) 1594 .Case(6, HasAnyBits64(0x3)) 1595 .Case(7, HasAnyBits64(0x80000000)) 1596 .Case(8, HasAnyBits64(0x100000000ULL)) 1597 .Case(9, HasAnyBits64(0x300000000ULL)) 1598 .Case(10, HasAnyBits64(0x100000001ULL)) 1599 #endif 1600 .Default(Kill("Invalid test case number")); 1601 } 1602 return Allow(); 1603 } 1604 1605 BPF_TEST_C(SandboxBPF, AnyBitTests, AnyBitTestPolicy) { 1606 // 32bit test: any of 0x0 (should always be false) 1607 BITMASK_TEST( 0, 0, ANYBITS32, 0x0, EXPECT_FAILURE); 1608 BITMASK_TEST( 0, 1, ANYBITS32, 0x0, EXPECT_FAILURE); 1609 BITMASK_TEST( 0, 3, ANYBITS32, 0x0, EXPECT_FAILURE); 1610 BITMASK_TEST( 0, 0xFFFFFFFFU, ANYBITS32, 0x0, EXPECT_FAILURE); 1611 BITMASK_TEST( 0, -1LL, ANYBITS32, 0x0, EXPECT_FAILURE); 1612 1613 // 32bit test: any of 0x1 1614 BITMASK_TEST( 1, 0, ANYBITS32, 0x1, EXPECT_FAILURE); 1615 BITMASK_TEST( 1, 1, ANYBITS32, 0x1, EXPECT_SUCCESS); 1616 BITMASK_TEST( 1, 2, ANYBITS32, 0x1, EXPECT_FAILURE); 1617 BITMASK_TEST( 1, 3, ANYBITS32, 0x1, EXPECT_SUCCESS); 1618 1619 // 32bit test: any of 0x3 1620 BITMASK_TEST( 2, 0, ANYBITS32, 0x3, EXPECT_FAILURE); 1621 BITMASK_TEST( 2, 1, ANYBITS32, 0x3, EXPECT_SUCCESS); 1622 BITMASK_TEST( 2, 2, ANYBITS32, 0x3, EXPECT_SUCCESS); 1623 BITMASK_TEST( 2, 3, ANYBITS32, 0x3, EXPECT_SUCCESS); 1624 BITMASK_TEST( 2, 7, ANYBITS32, 0x3, EXPECT_SUCCESS); 1625 1626 // 32bit test: any of 0x80000000 1627 BITMASK_TEST( 3, 0, ANYBITS32, 0x80000000, EXPECT_FAILURE); 1628 BITMASK_TEST( 3, 0x40000000U, ANYBITS32, 0x80000000, EXPECT_FAILURE); 1629 BITMASK_TEST( 3, 0x80000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS); 1630 BITMASK_TEST( 3, 0xC0000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS); 1631 BITMASK_TEST( 3, -0x80000000LL, ANYBITS32, 0x80000000, EXPECT_SUCCESS); 1632 1633 #if __SIZEOF_POINTER__ > 4 1634 // 64bit test: any of 0x0 (should always be false) 1635 BITMASK_TEST( 4, 0, ANYBITS64, 0x0, EXPECT_FAILURE); 1636 BITMASK_TEST( 4, 1, ANYBITS64, 0x0, EXPECT_FAILURE); 1637 BITMASK_TEST( 4, 3, ANYBITS64, 0x0, EXPECT_FAILURE); 1638 BITMASK_TEST( 4, 0xFFFFFFFFU, ANYBITS64, 0x0, EXPECT_FAILURE); 1639 BITMASK_TEST( 4, 0x100000000LL, ANYBITS64, 0x0, EXPECT_FAILURE); 1640 BITMASK_TEST( 4, 0x300000000LL, ANYBITS64, 0x0, EXPECT_FAILURE); 1641 BITMASK_TEST( 4,0x8000000000000000LL, ANYBITS64, 0x0, EXPECT_FAILURE); 1642 BITMASK_TEST( 4, -1LL, ANYBITS64, 0x0, EXPECT_FAILURE); 1643 1644 // 64bit test: any of 0x1 1645 BITMASK_TEST( 5, 0, ANYBITS64, 0x1, EXPECT_FAILURE); 1646 BITMASK_TEST( 5, 1, ANYBITS64, 0x1, EXPECT_SUCCESS); 1647 BITMASK_TEST( 5, 2, ANYBITS64, 0x1, EXPECT_FAILURE); 1648 BITMASK_TEST( 5, 3, ANYBITS64, 0x1, EXPECT_SUCCESS); 1649 BITMASK_TEST( 5, 0x100000001LL, ANYBITS64, 0x1, EXPECT_SUCCESS); 1650 BITMASK_TEST( 5, 0x100000000LL, ANYBITS64, 0x1, EXPECT_FAILURE); 1651 BITMASK_TEST( 5, 0x100000002LL, ANYBITS64, 0x1, EXPECT_FAILURE); 1652 BITMASK_TEST( 5, 0x100000003LL, ANYBITS64, 0x1, EXPECT_SUCCESS); 1653 1654 // 64bit test: any of 0x3 1655 BITMASK_TEST( 6, 0, ANYBITS64, 0x3, EXPECT_FAILURE); 1656 BITMASK_TEST( 6, 1, ANYBITS64, 0x3, EXPECT_SUCCESS); 1657 BITMASK_TEST( 6, 2, ANYBITS64, 0x3, EXPECT_SUCCESS); 1658 BITMASK_TEST( 6, 3, ANYBITS64, 0x3, EXPECT_SUCCESS); 1659 BITMASK_TEST( 6, 7, ANYBITS64, 0x3, EXPECT_SUCCESS); 1660 BITMASK_TEST( 6, 0x100000000LL, ANYBITS64, 0x3, EXPECT_FAILURE); 1661 BITMASK_TEST( 6, 0x100000001LL, ANYBITS64, 0x3, EXPECT_SUCCESS); 1662 BITMASK_TEST( 6, 0x100000002LL, ANYBITS64, 0x3, EXPECT_SUCCESS); 1663 BITMASK_TEST( 6, 0x100000003LL, ANYBITS64, 0x3, EXPECT_SUCCESS); 1664 BITMASK_TEST( 6, 0x100000007LL, ANYBITS64, 0x3, EXPECT_SUCCESS); 1665 1666 // 64bit test: any of 0x80000000 1667 BITMASK_TEST( 7, 0, ANYBITS64, 0x80000000, EXPECT_FAILURE); 1668 BITMASK_TEST( 7, 0x40000000U, ANYBITS64, 0x80000000, EXPECT_FAILURE); 1669 BITMASK_TEST( 7, 0x80000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS); 1670 BITMASK_TEST( 7, 0xC0000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS); 1671 BITMASK_TEST( 7, -0x80000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS); 1672 BITMASK_TEST( 7, 0x100000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE); 1673 BITMASK_TEST( 7, 0x140000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE); 1674 BITMASK_TEST( 7, 0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS); 1675 BITMASK_TEST( 7, 0x1C0000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS); 1676 BITMASK_TEST( 7, -0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS); 1677 1678 // 64bit test: any of 0x100000000 1679 BITMASK_TEST( 8, 0x000000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE); 1680 BITMASK_TEST( 8, 0x100000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS); 1681 BITMASK_TEST( 8, 0x200000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE); 1682 BITMASK_TEST( 8, 0x300000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS); 1683 BITMASK_TEST( 8, 0x000000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE); 1684 BITMASK_TEST( 8, 0x100000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS); 1685 BITMASK_TEST( 8, 0x200000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE); 1686 BITMASK_TEST( 8, 0x300000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS); 1687 1688 // 64bit test: any of 0x300000000 1689 BITMASK_TEST( 9, 0x000000000LL, ANYBITS64,0x300000000, EXPECT_FAILURE); 1690 BITMASK_TEST( 9, 0x100000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); 1691 BITMASK_TEST( 9, 0x200000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); 1692 BITMASK_TEST( 9, 0x300000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); 1693 BITMASK_TEST( 9, 0x700000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); 1694 BITMASK_TEST( 9, 0x000000001LL, ANYBITS64,0x300000000, EXPECT_FAILURE); 1695 BITMASK_TEST( 9, 0x100000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); 1696 BITMASK_TEST( 9, 0x200000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); 1697 BITMASK_TEST( 9, 0x300000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); 1698 BITMASK_TEST( 9, 0x700000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); 1699 1700 // 64bit test: any of 0x100000001 1701 BITMASK_TEST( 10, 0x000000000LL, ANYBITS64,0x100000001, EXPECT_FAILURE); 1702 BITMASK_TEST( 10, 0x000000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS); 1703 BITMASK_TEST( 10, 0x100000000LL, ANYBITS64,0x100000001, EXPT64_SUCCESS); 1704 BITMASK_TEST( 10, 0x100000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS); 1705 BITMASK_TEST( 10, 0xFFFFFFFFU, ANYBITS64,0x100000001, EXPECT_SUCCESS); 1706 BITMASK_TEST( 10, -1L, ANYBITS64,0x100000001, EXPECT_SUCCESS); 1707 #endif 1708 } 1709 1710 class MaskedEqualTestPolicy : public SandboxBPFDSLPolicy { 1711 public: 1712 MaskedEqualTestPolicy() {} 1713 virtual ~MaskedEqualTestPolicy() {} 1714 1715 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE; 1716 1717 private: 1718 static ResultExpr MaskedEqual32(uint32_t mask, uint32_t value); 1719 static ResultExpr MaskedEqual64(uint64_t mask, uint64_t value); 1720 1721 DISALLOW_COPY_AND_ASSIGN(MaskedEqualTestPolicy); 1722 }; 1723 1724 ResultExpr MaskedEqualTestPolicy::MaskedEqual32(uint32_t mask, uint32_t value) { 1725 const Arg<uint32_t> arg(1); 1726 return If((arg & mask) == value, Error(1)).Else(Error(0)); 1727 } 1728 1729 ResultExpr MaskedEqualTestPolicy::MaskedEqual64(uint64_t mask, uint64_t value) { 1730 const Arg<uint64_t> arg(1); 1731 return If((arg & mask) == value, Error(1)).Else(Error(0)); 1732 } 1733 1734 ResultExpr MaskedEqualTestPolicy::EvaluateSyscall(int sysno) const { 1735 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 1736 1737 if (sysno == __NR_uname) { 1738 const Arg<int> option(0); 1739 return Switch(option) 1740 .Case(0, MaskedEqual32(0x00ff00ff, 0x005500aa)) 1741 #if __SIZEOF_POINTER__ > 4 1742 .Case(1, MaskedEqual64(0x00ff00ff00000000, 0x005500aa00000000)) 1743 .Case(2, MaskedEqual64(0x00ff00ff00ff00ff, 0x005500aa005500aa)) 1744 #endif 1745 .Default(Kill("Invalid test case number")); 1746 } 1747 1748 return Allow(); 1749 } 1750 1751 #define MASKEQ_TEST(rulenum, arg, expected_result) \ 1752 BPF_ASSERT(Syscall::Call(__NR_uname, (rulenum), (arg)) == (expected_result)) 1753 1754 BPF_TEST_C(SandboxBPF, MaskedEqualTests, MaskedEqualTestPolicy) { 1755 // Allowed: 0x__55__aa 1756 MASKEQ_TEST(0, 0x00000000, EXPECT_FAILURE); 1757 MASKEQ_TEST(0, 0x00000001, EXPECT_FAILURE); 1758 MASKEQ_TEST(0, 0x00000003, EXPECT_FAILURE); 1759 MASKEQ_TEST(0, 0x00000100, EXPECT_FAILURE); 1760 MASKEQ_TEST(0, 0x00000300, EXPECT_FAILURE); 1761 MASKEQ_TEST(0, 0x005500aa, EXPECT_SUCCESS); 1762 MASKEQ_TEST(0, 0x005500ab, EXPECT_FAILURE); 1763 MASKEQ_TEST(0, 0x005600aa, EXPECT_FAILURE); 1764 MASKEQ_TEST(0, 0x005501aa, EXPECT_SUCCESS); 1765 MASKEQ_TEST(0, 0x005503aa, EXPECT_SUCCESS); 1766 MASKEQ_TEST(0, 0x555500aa, EXPECT_SUCCESS); 1767 MASKEQ_TEST(0, 0xaa5500aa, EXPECT_SUCCESS); 1768 1769 #if __SIZEOF_POINTER__ > 4 1770 // Allowed: 0x__55__aa________ 1771 MASKEQ_TEST(1, 0x0000000000000000, EXPECT_FAILURE); 1772 MASKEQ_TEST(1, 0x0000000000000010, EXPECT_FAILURE); 1773 MASKEQ_TEST(1, 0x0000000000000050, EXPECT_FAILURE); 1774 MASKEQ_TEST(1, 0x0000000100000000, EXPECT_FAILURE); 1775 MASKEQ_TEST(1, 0x0000000300000000, EXPECT_FAILURE); 1776 MASKEQ_TEST(1, 0x0000010000000000, EXPECT_FAILURE); 1777 MASKEQ_TEST(1, 0x0000030000000000, EXPECT_FAILURE); 1778 MASKEQ_TEST(1, 0x005500aa00000000, EXPECT_SUCCESS); 1779 MASKEQ_TEST(1, 0x005500ab00000000, EXPECT_FAILURE); 1780 MASKEQ_TEST(1, 0x005600aa00000000, EXPECT_FAILURE); 1781 MASKEQ_TEST(1, 0x005501aa00000000, EXPECT_SUCCESS); 1782 MASKEQ_TEST(1, 0x005503aa00000000, EXPECT_SUCCESS); 1783 MASKEQ_TEST(1, 0x555500aa00000000, EXPECT_SUCCESS); 1784 MASKEQ_TEST(1, 0xaa5500aa00000000, EXPECT_SUCCESS); 1785 MASKEQ_TEST(1, 0xaa5500aa00000000, EXPECT_SUCCESS); 1786 MASKEQ_TEST(1, 0xaa5500aa0000cafe, EXPECT_SUCCESS); 1787 1788 // Allowed: 0x__55__aa__55__aa 1789 MASKEQ_TEST(2, 0x0000000000000000, EXPECT_FAILURE); 1790 MASKEQ_TEST(2, 0x0000000000000010, EXPECT_FAILURE); 1791 MASKEQ_TEST(2, 0x0000000000000050, EXPECT_FAILURE); 1792 MASKEQ_TEST(2, 0x0000000100000000, EXPECT_FAILURE); 1793 MASKEQ_TEST(2, 0x0000000300000000, EXPECT_FAILURE); 1794 MASKEQ_TEST(2, 0x0000010000000000, EXPECT_FAILURE); 1795 MASKEQ_TEST(2, 0x0000030000000000, EXPECT_FAILURE); 1796 MASKEQ_TEST(2, 0x00000000005500aa, EXPECT_FAILURE); 1797 MASKEQ_TEST(2, 0x005500aa00000000, EXPECT_FAILURE); 1798 MASKEQ_TEST(2, 0x005500aa005500aa, EXPECT_SUCCESS); 1799 MASKEQ_TEST(2, 0x005500aa005700aa, EXPECT_FAILURE); 1800 MASKEQ_TEST(2, 0x005700aa005500aa, EXPECT_FAILURE); 1801 MASKEQ_TEST(2, 0x005500aa004500aa, EXPECT_FAILURE); 1802 MASKEQ_TEST(2, 0x004500aa005500aa, EXPECT_FAILURE); 1803 MASKEQ_TEST(2, 0x005512aa005500aa, EXPECT_SUCCESS); 1804 MASKEQ_TEST(2, 0x005500aa005534aa, EXPECT_SUCCESS); 1805 MASKEQ_TEST(2, 0xff5500aa0055ffaa, EXPECT_SUCCESS); 1806 #endif 1807 } 1808 1809 intptr_t PthreadTrapHandler(const struct arch_seccomp_data& args, void* aux) { 1810 if (args.args[0] != (CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD)) { 1811 // We expect to get called for an attempt to fork(). No need to log that 1812 // call. But if we ever get called for anything else, we want to verbosely 1813 // print as much information as possible. 1814 const char* msg = (const char*)aux; 1815 printf( 1816 "Clone() was called with unexpected arguments\n" 1817 " nr: %d\n" 1818 " 1: 0x%llX\n" 1819 " 2: 0x%llX\n" 1820 " 3: 0x%llX\n" 1821 " 4: 0x%llX\n" 1822 " 5: 0x%llX\n" 1823 " 6: 0x%llX\n" 1824 "%s\n", 1825 args.nr, 1826 (long long)args.args[0], 1827 (long long)args.args[1], 1828 (long long)args.args[2], 1829 (long long)args.args[3], 1830 (long long)args.args[4], 1831 (long long)args.args[5], 1832 msg); 1833 } 1834 return -EPERM; 1835 } 1836 1837 class PthreadPolicyEquality : public SandboxBPFDSLPolicy { 1838 public: 1839 PthreadPolicyEquality() {} 1840 virtual ~PthreadPolicyEquality() {} 1841 1842 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE; 1843 1844 private: 1845 DISALLOW_COPY_AND_ASSIGN(PthreadPolicyEquality); 1846 }; 1847 1848 ResultExpr PthreadPolicyEquality::EvaluateSyscall(int sysno) const { 1849 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 1850 // This policy allows creating threads with pthread_create(). But it 1851 // doesn't allow any other uses of clone(). Most notably, it does not 1852 // allow callers to implement fork() or vfork() by passing suitable flags 1853 // to the clone() system call. 1854 if (sysno == __NR_clone) { 1855 // We have seen two different valid combinations of flags. Glibc 1856 // uses the more modern flags, sets the TLS from the call to clone(), and 1857 // uses futexes to monitor threads. Android's C run-time library, doesn't 1858 // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED. 1859 // More recent versions of Android don't set CLONE_DETACHED anymore, so 1860 // the last case accounts for that. 1861 // The following policy is very strict. It only allows the exact masks 1862 // that we have seen in known implementations. It is probably somewhat 1863 // stricter than what we would want to do. 1864 const uint64_t kGlibcCloneMask = CLONE_VM | CLONE_FS | CLONE_FILES | 1865 CLONE_SIGHAND | CLONE_THREAD | 1866 CLONE_SYSVSEM | CLONE_SETTLS | 1867 CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID; 1868 const uint64_t kBaseAndroidCloneMask = CLONE_VM | CLONE_FS | CLONE_FILES | 1869 CLONE_SIGHAND | CLONE_THREAD | 1870 CLONE_SYSVSEM; 1871 const Arg<unsigned long> flags(0); 1872 return If(flags == kGlibcCloneMask || 1873 flags == (kBaseAndroidCloneMask | CLONE_DETACHED) || 1874 flags == kBaseAndroidCloneMask, 1875 Allow()).Else(Trap(PthreadTrapHandler, "Unknown mask")); 1876 } 1877 1878 return Allow(); 1879 } 1880 1881 class PthreadPolicyBitMask : public SandboxBPFDSLPolicy { 1882 public: 1883 PthreadPolicyBitMask() {} 1884 virtual ~PthreadPolicyBitMask() {} 1885 1886 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE; 1887 1888 private: 1889 static BoolExpr HasAnyBits(const Arg<unsigned long>& arg, unsigned long bits); 1890 static BoolExpr HasAllBits(const Arg<unsigned long>& arg, unsigned long bits); 1891 1892 DISALLOW_COPY_AND_ASSIGN(PthreadPolicyBitMask); 1893 }; 1894 1895 BoolExpr PthreadPolicyBitMask::HasAnyBits(const Arg<unsigned long>& arg, 1896 unsigned long bits) { 1897 return (arg & bits) != 0; 1898 } 1899 1900 BoolExpr PthreadPolicyBitMask::HasAllBits(const Arg<unsigned long>& arg, 1901 unsigned long bits) { 1902 return (arg & bits) == bits; 1903 } 1904 1905 ResultExpr PthreadPolicyBitMask::EvaluateSyscall(int sysno) const { 1906 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 1907 // This policy allows creating threads with pthread_create(). But it 1908 // doesn't allow any other uses of clone(). Most notably, it does not 1909 // allow callers to implement fork() or vfork() by passing suitable flags 1910 // to the clone() system call. 1911 if (sysno == __NR_clone) { 1912 // We have seen two different valid combinations of flags. Glibc 1913 // uses the more modern flags, sets the TLS from the call to clone(), and 1914 // uses futexes to monitor threads. Android's C run-time library, doesn't 1915 // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED. 1916 // The following policy allows for either combination of flags, but it 1917 // is generally a little more conservative than strictly necessary. We 1918 // err on the side of rather safe than sorry. 1919 // Very noticeably though, we disallow fork() (which is often just a 1920 // wrapper around clone()). 1921 const unsigned long kMandatoryFlags = CLONE_VM | CLONE_FS | CLONE_FILES | 1922 CLONE_SIGHAND | CLONE_THREAD | 1923 CLONE_SYSVSEM; 1924 const unsigned long kFutexFlags = 1925 CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID; 1926 const unsigned long kNoopFlags = CLONE_DETACHED; 1927 const unsigned long kKnownFlags = 1928 kMandatoryFlags | kFutexFlags | kNoopFlags; 1929 1930 const Arg<unsigned long> flags(0); 1931 return If(HasAnyBits(flags, ~kKnownFlags), 1932 Trap(PthreadTrapHandler, "Unexpected CLONE_XXX flag found")) 1933 .ElseIf(!HasAllBits(flags, kMandatoryFlags), 1934 Trap(PthreadTrapHandler, 1935 "Missing mandatory CLONE_XXX flags " 1936 "when creating new thread")) 1937 .ElseIf( 1938 !HasAllBits(flags, kFutexFlags) && HasAnyBits(flags, kFutexFlags), 1939 Trap(PthreadTrapHandler, 1940 "Must set either all or none of the TLS and futex bits in " 1941 "call to clone()")) 1942 .Else(Allow()); 1943 } 1944 1945 return Allow(); 1946 } 1947 1948 static void* ThreadFnc(void* arg) { 1949 ++*reinterpret_cast<int*>(arg); 1950 Syscall::Call(__NR_futex, arg, FUTEX_WAKE, 1, 0, 0, 0); 1951 return NULL; 1952 } 1953 1954 static void PthreadTest() { 1955 // Attempt to start a joinable thread. This should succeed. 1956 pthread_t thread; 1957 int thread_ran = 0; 1958 BPF_ASSERT(!pthread_create(&thread, NULL, ThreadFnc, &thread_ran)); 1959 BPF_ASSERT(!pthread_join(thread, NULL)); 1960 BPF_ASSERT(thread_ran); 1961 1962 // Attempt to start a detached thread. This should succeed. 1963 thread_ran = 0; 1964 pthread_attr_t attr; 1965 BPF_ASSERT(!pthread_attr_init(&attr)); 1966 BPF_ASSERT(!pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)); 1967 BPF_ASSERT(!pthread_create(&thread, &attr, ThreadFnc, &thread_ran)); 1968 BPF_ASSERT(!pthread_attr_destroy(&attr)); 1969 while (Syscall::Call(__NR_futex, &thread_ran, FUTEX_WAIT, 0, 0, 0, 0) == 1970 -EINTR) { 1971 } 1972 BPF_ASSERT(thread_ran); 1973 1974 // Attempt to fork() a process using clone(). This should fail. We use the 1975 // same flags that glibc uses when calling fork(). But we don't actually 1976 // try calling the fork() implementation in the C run-time library, as 1977 // run-time libraries other than glibc might call __NR_fork instead of 1978 // __NR_clone, and that would introduce a bogus test failure. 1979 int pid; 1980 BPF_ASSERT(Syscall::Call(__NR_clone, 1981 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD, 1982 0, 1983 0, 1984 &pid) == -EPERM); 1985 } 1986 1987 BPF_TEST_C(SandboxBPF, PthreadEquality, PthreadPolicyEquality) { 1988 PthreadTest(); 1989 } 1990 1991 BPF_TEST_C(SandboxBPF, PthreadBitMask, PthreadPolicyBitMask) { 1992 PthreadTest(); 1993 } 1994 1995 // libc might not define these even though the kernel supports it. 1996 #ifndef PTRACE_O_TRACESECCOMP 1997 #define PTRACE_O_TRACESECCOMP 0x00000080 1998 #endif 1999 2000 #ifdef PTRACE_EVENT_SECCOMP 2001 #define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP) 2002 #else 2003 // When Debian/Ubuntu backported seccomp-bpf support into earlier kernels, they 2004 // changed the value of PTRACE_EVENT_SECCOMP from 7 to 8, since 7 was taken by 2005 // PTRACE_EVENT_STOP (upstream chose to renumber PTRACE_EVENT_STOP to 128). If 2006 // PTRACE_EVENT_SECCOMP isn't defined, we have no choice but to consider both 2007 // values here. 2008 #define IS_SECCOMP_EVENT(status) ((status >> 16) == 7 || (status >> 16) == 8) 2009 #endif 2010 2011 #if defined(__arm__) 2012 #ifndef PTRACE_SET_SYSCALL 2013 #define PTRACE_SET_SYSCALL 23 2014 #endif 2015 #endif 2016 2017 #if defined(__aarch64__) 2018 #ifndef PTRACE_GETREGS 2019 #define PTRACE_GETREGS 12 2020 #endif 2021 #endif 2022 2023 #if defined(__aarch64__) 2024 #ifndef PTRACE_SETREGS 2025 #define PTRACE_SETREGS 13 2026 #endif 2027 #endif 2028 2029 // Changes the syscall to run for a child being sandboxed using seccomp-bpf with 2030 // PTRACE_O_TRACESECCOMP. Should only be called when the child is stopped on 2031 // PTRACE_EVENT_SECCOMP. 2032 // 2033 // regs should contain the current set of registers of the child, obtained using 2034 // PTRACE_GETREGS. 2035 // 2036 // Depending on the architecture, this may modify regs, so the caller is 2037 // responsible for committing these changes using PTRACE_SETREGS. 2038 long SetSyscall(pid_t pid, regs_struct* regs, int syscall_number) { 2039 #if defined(__arm__) 2040 // On ARM, the syscall is changed using PTRACE_SET_SYSCALL. We cannot use the 2041 // libc ptrace call as the request parameter is an enum, and 2042 // PTRACE_SET_SYSCALL may not be in the enum. 2043 return syscall(__NR_ptrace, PTRACE_SET_SYSCALL, pid, NULL, syscall_number); 2044 #endif 2045 2046 SECCOMP_PT_SYSCALL(*regs) = syscall_number; 2047 return 0; 2048 } 2049 2050 const uint16_t kTraceData = 0xcc; 2051 2052 class TraceAllPolicy : public SandboxBPFDSLPolicy { 2053 public: 2054 TraceAllPolicy() {} 2055 virtual ~TraceAllPolicy() {} 2056 2057 virtual ResultExpr EvaluateSyscall(int system_call_number) const OVERRIDE { 2058 return Trace(kTraceData); 2059 } 2060 2061 private: 2062 DISALLOW_COPY_AND_ASSIGN(TraceAllPolicy); 2063 }; 2064 2065 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(SeccompRetTrace)) { 2066 if (SandboxBPF::SupportsSeccompSandbox(-1) != 2067 sandbox::SandboxBPF::STATUS_AVAILABLE) { 2068 return; 2069 } 2070 2071 // This test is disabled on arm due to a kernel bug. 2072 // See https://code.google.com/p/chromium/issues/detail?id=383977 2073 #if defined(__arm__) || defined(__aarch64__) 2074 printf("This test is currently disabled on ARM32/64 due to a kernel bug."); 2075 return; 2076 #endif 2077 2078 #if defined(__mips__) 2079 // TODO: Figure out how to support specificity of handling indirect syscalls 2080 // in this test and enable it. 2081 printf("This test is currently disabled on MIPS."); 2082 return; 2083 #endif 2084 2085 pid_t pid = fork(); 2086 BPF_ASSERT_NE(-1, pid); 2087 if (pid == 0) { 2088 pid_t my_pid = getpid(); 2089 BPF_ASSERT_NE(-1, ptrace(PTRACE_TRACEME, -1, NULL, NULL)); 2090 BPF_ASSERT_EQ(0, raise(SIGSTOP)); 2091 SandboxBPF sandbox; 2092 sandbox.SetSandboxPolicy(new TraceAllPolicy); 2093 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED)); 2094 2095 // getpid is allowed. 2096 BPF_ASSERT_EQ(my_pid, syscall(__NR_getpid)); 2097 2098 // write to stdout is skipped and returns a fake value. 2099 BPF_ASSERT_EQ(kExpectedReturnValue, 2100 syscall(__NR_write, STDOUT_FILENO, "A", 1)); 2101 2102 // kill is rewritten to exit(kExpectedReturnValue). 2103 syscall(__NR_kill, my_pid, SIGKILL); 2104 2105 // Should not be reached. 2106 BPF_ASSERT(false); 2107 } 2108 2109 int status; 2110 BPF_ASSERT(HANDLE_EINTR(waitpid(pid, &status, WUNTRACED)) != -1); 2111 BPF_ASSERT(WIFSTOPPED(status)); 2112 2113 BPF_ASSERT_NE(-1, 2114 ptrace(PTRACE_SETOPTIONS, 2115 pid, 2116 NULL, 2117 reinterpret_cast<void*>(PTRACE_O_TRACESECCOMP))); 2118 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL)); 2119 while (true) { 2120 BPF_ASSERT(HANDLE_EINTR(waitpid(pid, &status, 0)) != -1); 2121 if (WIFEXITED(status) || WIFSIGNALED(status)) { 2122 BPF_ASSERT(WIFEXITED(status)); 2123 BPF_ASSERT_EQ(kExpectedReturnValue, WEXITSTATUS(status)); 2124 break; 2125 } 2126 2127 if (!WIFSTOPPED(status) || WSTOPSIG(status) != SIGTRAP || 2128 !IS_SECCOMP_EVENT(status)) { 2129 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL)); 2130 continue; 2131 } 2132 2133 unsigned long data; 2134 BPF_ASSERT_NE(-1, ptrace(PTRACE_GETEVENTMSG, pid, NULL, &data)); 2135 BPF_ASSERT_EQ(kTraceData, data); 2136 2137 regs_struct regs; 2138 BPF_ASSERT_NE(-1, ptrace(PTRACE_GETREGS, pid, NULL, ®s)); 2139 switch (SECCOMP_PT_SYSCALL(regs)) { 2140 case __NR_write: 2141 // Skip writes to stdout, make it return kExpectedReturnValue. Allow 2142 // writes to stderr so that BPF_ASSERT messages show up. 2143 if (SECCOMP_PT_PARM1(regs) == STDOUT_FILENO) { 2144 BPF_ASSERT_NE(-1, SetSyscall(pid, ®s, -1)); 2145 SECCOMP_PT_RESULT(regs) = kExpectedReturnValue; 2146 BPF_ASSERT_NE(-1, ptrace(PTRACE_SETREGS, pid, NULL, ®s)); 2147 } 2148 break; 2149 2150 case __NR_kill: 2151 // Rewrite to exit(kExpectedReturnValue). 2152 BPF_ASSERT_NE(-1, SetSyscall(pid, ®s, __NR_exit)); 2153 SECCOMP_PT_PARM1(regs) = kExpectedReturnValue; 2154 BPF_ASSERT_NE(-1, ptrace(PTRACE_SETREGS, pid, NULL, ®s)); 2155 break; 2156 2157 default: 2158 // Allow all other syscalls. 2159 break; 2160 } 2161 2162 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL)); 2163 } 2164 } 2165 2166 // Android does not expose pread64 nor pwrite64. 2167 #if !defined(OS_ANDROID) 2168 2169 bool FullPwrite64(int fd, const char* buffer, size_t count, off64_t offset) { 2170 while (count > 0) { 2171 const ssize_t transfered = 2172 HANDLE_EINTR(pwrite64(fd, buffer, count, offset)); 2173 if (transfered <= 0 || static_cast<size_t>(transfered) > count) { 2174 return false; 2175 } 2176 count -= transfered; 2177 buffer += transfered; 2178 offset += transfered; 2179 } 2180 return true; 2181 } 2182 2183 bool FullPread64(int fd, char* buffer, size_t count, off64_t offset) { 2184 while (count > 0) { 2185 const ssize_t transfered = HANDLE_EINTR(pread64(fd, buffer, count, offset)); 2186 if (transfered <= 0 || static_cast<size_t>(transfered) > count) { 2187 return false; 2188 } 2189 count -= transfered; 2190 buffer += transfered; 2191 offset += transfered; 2192 } 2193 return true; 2194 } 2195 2196 bool pread_64_was_forwarded = false; 2197 2198 class TrapPread64Policy : public SandboxBPFDSLPolicy { 2199 public: 2200 TrapPread64Policy() {} 2201 virtual ~TrapPread64Policy() {} 2202 2203 virtual ResultExpr EvaluateSyscall(int system_call_number) const OVERRIDE { 2204 // Set the global environment for unsafe traps once. 2205 if (system_call_number == MIN_SYSCALL) { 2206 EnableUnsafeTraps(); 2207 } 2208 2209 if (system_call_number == __NR_pread64) { 2210 return UnsafeTrap(ForwardPreadHandler, NULL); 2211 } 2212 return Allow(); 2213 } 2214 2215 private: 2216 static intptr_t ForwardPreadHandler(const struct arch_seccomp_data& args, 2217 void* aux) { 2218 BPF_ASSERT(args.nr == __NR_pread64); 2219 pread_64_was_forwarded = true; 2220 2221 return SandboxBPF::ForwardSyscall(args); 2222 } 2223 2224 DISALLOW_COPY_AND_ASSIGN(TrapPread64Policy); 2225 }; 2226 2227 // pread(2) takes a 64 bits offset. On 32 bits systems, it will be split 2228 // between two arguments. In this test, we make sure that ForwardSyscall() can 2229 // forward it properly. 2230 BPF_TEST_C(SandboxBPF, Pread64, TrapPread64Policy) { 2231 ScopedTemporaryFile temp_file; 2232 const uint64_t kLargeOffset = (static_cast<uint64_t>(1) << 32) | 0xBEEF; 2233 const char kTestString[] = "This is a test!"; 2234 BPF_ASSERT(FullPwrite64( 2235 temp_file.fd(), kTestString, sizeof(kTestString), kLargeOffset)); 2236 2237 char read_test_string[sizeof(kTestString)] = {0}; 2238 BPF_ASSERT(FullPread64(temp_file.fd(), 2239 read_test_string, 2240 sizeof(read_test_string), 2241 kLargeOffset)); 2242 BPF_ASSERT_EQ(0, memcmp(kTestString, read_test_string, sizeof(kTestString))); 2243 BPF_ASSERT(pread_64_was_forwarded); 2244 } 2245 2246 #endif // !defined(OS_ANDROID) 2247 2248 void* TsyncApplyToTwoThreadsFunc(void* cond_ptr) { 2249 base::WaitableEvent* event = static_cast<base::WaitableEvent*>(cond_ptr); 2250 2251 // Wait for the main thread to signal that the filter has been applied. 2252 if (!event->IsSignaled()) { 2253 event->Wait(); 2254 } 2255 2256 BPF_ASSERT(event->IsSignaled()); 2257 2258 BlacklistNanosleepPolicy::AssertNanosleepFails(); 2259 2260 return NULL; 2261 } 2262 2263 SANDBOX_TEST(SandboxBPF, Tsync) { 2264 if (SandboxBPF::SupportsSeccompThreadFilterSynchronization() != 2265 SandboxBPF::STATUS_AVAILABLE) { 2266 return; 2267 } 2268 2269 base::WaitableEvent event(true, false); 2270 2271 // Create a thread on which to invoke the blocked syscall. 2272 pthread_t thread; 2273 BPF_ASSERT_EQ( 2274 0, pthread_create(&thread, NULL, &TsyncApplyToTwoThreadsFunc, &event)); 2275 2276 // Test that nanoseelp success. 2277 const struct timespec ts = {0, 0}; 2278 BPF_ASSERT_EQ(0, HANDLE_EINTR(syscall(__NR_nanosleep, &ts, NULL))); 2279 2280 // Engage the sandbox. 2281 SandboxBPF sandbox; 2282 sandbox.SetSandboxPolicy(new BlacklistNanosleepPolicy()); 2283 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_MULTI_THREADED)); 2284 2285 // This thread should have the filter applied as well. 2286 BlacklistNanosleepPolicy::AssertNanosleepFails(); 2287 2288 // Signal the condition to invoke the system call. 2289 event.Signal(); 2290 2291 // Wait for the thread to finish. 2292 BPF_ASSERT_EQ(0, pthread_join(thread, NULL)); 2293 } 2294 2295 class AllowAllPolicy : public SandboxBPFDSLPolicy { 2296 public: 2297 AllowAllPolicy() {} 2298 virtual ~AllowAllPolicy() {} 2299 2300 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE { 2301 return Allow(); 2302 } 2303 2304 private: 2305 DISALLOW_COPY_AND_ASSIGN(AllowAllPolicy); 2306 }; 2307 2308 SANDBOX_DEATH_TEST( 2309 SandboxBPF, 2310 StartMultiThreadedAsSingleThreaded, 2311 DEATH_MESSAGE("Cannot start sandbox; process is already multi-threaded")) { 2312 base::Thread thread("sandbox.linux.StartMultiThreadedAsSingleThreaded"); 2313 BPF_ASSERT(thread.Start()); 2314 2315 SandboxBPF sandbox; 2316 sandbox.SetSandboxPolicy(new AllowAllPolicy()); 2317 BPF_ASSERT(!sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED)); 2318 } 2319 2320 // http://crbug.com/407357 2321 #if !defined(THREAD_SANITIZER) 2322 SANDBOX_DEATH_TEST( 2323 SandboxBPF, 2324 StartSingleThreadedAsMultiThreaded, 2325 DEATH_MESSAGE( 2326 "Cannot start sandbox; process may be single-threaded when " 2327 "reported as not")) { 2328 SandboxBPF sandbox; 2329 sandbox.SetSandboxPolicy(new AllowAllPolicy()); 2330 BPF_ASSERT(!sandbox.StartSandbox(SandboxBPF::PROCESS_MULTI_THREADED)); 2331 } 2332 #endif // !defined(THREAD_SANITIZER) 2333 2334 // A stub handler for the UnsafeTrap. Never called. 2335 intptr_t NoOpHandler(const struct arch_seccomp_data& args, void*) { 2336 return -1; 2337 } 2338 2339 class UnsafeTrapWithCondPolicy : public SandboxBPFDSLPolicy { 2340 public: 2341 UnsafeTrapWithCondPolicy() {} 2342 virtual ~UnsafeTrapWithCondPolicy() {} 2343 2344 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE { 2345 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); 2346 setenv(kSandboxDebuggingEnv, "t", 0); 2347 Die::SuppressInfoMessages(true); 2348 2349 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno)) 2350 return Allow(); 2351 2352 switch (sysno) { 2353 case __NR_uname: { 2354 const Arg<uint32_t> arg(0); 2355 return If(arg == 0, Allow()).Else(Error(EPERM)); 2356 } 2357 case __NR_setgid: { 2358 const Arg<uint32_t> arg(0); 2359 return Switch(arg) 2360 .Case(100, Error(ENOMEM)) 2361 .Case(200, Error(ENOSYS)) 2362 .Default(Error(EPERM)); 2363 } 2364 case __NR_close: 2365 case __NR_exit_group: 2366 case __NR_write: 2367 return Allow(); 2368 case __NR_getppid: 2369 return UnsafeTrap(NoOpHandler, NULL); 2370 default: 2371 return Error(EPERM); 2372 } 2373 } 2374 2375 private: 2376 DISALLOW_COPY_AND_ASSIGN(UnsafeTrapWithCondPolicy); 2377 }; 2378 2379 BPF_TEST_C(SandboxBPF, UnsafeTrapWithCond, UnsafeTrapWithCondPolicy) { 2380 BPF_ASSERT_EQ(-1, syscall(__NR_uname, 0)); 2381 BPF_ASSERT_EQ(EFAULT, errno); 2382 2383 BPF_ASSERT_EQ(-1, syscall(__NR_uname, 1)); 2384 BPF_ASSERT_EQ(EPERM, errno); 2385 2386 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 100)); 2387 BPF_ASSERT_EQ(ENOMEM, errno); 2388 2389 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 200)); 2390 BPF_ASSERT_EQ(ENOSYS, errno); 2391 2392 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 300)); 2393 BPF_ASSERT_EQ(EPERM, errno); 2394 } 2395 2396 } // namespace 2397 2398 } // namespace bpf_dsl 2399 } // namespace sandbox 2400