1 /* Copyright (c) 2005-2011, Google Inc. 2 * All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are 6 * met: 7 * 8 * * Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * * Redistributions in binary form must reproduce the above 11 * copyright notice, this list of conditions and the following disclaimer 12 * in the documentation and/or other materials provided with the 13 * distribution. 14 * * Neither the name of Google Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived from 16 * this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * --- 31 * Author: Markus Gutschke 32 */ 33 34 /* This file includes Linux-specific support functions common to the 35 * coredumper and the thread lister; primarily, this is a collection 36 * of direct system calls, and a couple of symbols missing from 37 * standard header files. 38 * There are a few options that the including file can set to control 39 * the behavior of this file: 40 * 41 * SYS_CPLUSPLUS: 42 * The entire header file will normally be wrapped in 'extern "C" { }", 43 * making it suitable for compilation as both C and C++ source. If you 44 * do not want to do this, you can set the SYS_CPLUSPLUS macro to inhibit 45 * the wrapping. N.B. doing so will suppress inclusion of all prerequisite 46 * system header files, too. It is the caller's responsibility to provide 47 * the necessary definitions. 48 * 49 * SYS_ERRNO: 50 * All system calls will update "errno" unless overriden by setting the 51 * SYS_ERRNO macro prior to including this file. SYS_ERRNO should be 52 * an l-value. 53 * 54 * SYS_INLINE: 55 * New symbols will be defined "static inline", unless overridden by 56 * the SYS_INLINE macro. 57 * 58 * SYS_LINUX_SYSCALL_SUPPORT_H 59 * This macro is used to avoid multiple inclusions of this header file. 60 * If you need to include this file more than once, make sure to 61 * unset SYS_LINUX_SYSCALL_SUPPORT_H before each inclusion. 62 * 63 * SYS_PREFIX: 64 * New system calls will have a prefix of "sys_" unless overridden by 65 * the SYS_PREFIX macro. Valid values for this macro are [0..9] which 66 * results in prefixes "sys[0..9]_". It is also possible to set this 67 * macro to -1, which avoids all prefixes. 68 * 69 * SYS_SYSCALL_ENTRYPOINT: 70 * Some applications (such as sandboxes that filter system calls), need 71 * to be able to run custom-code each time a system call is made. If this 72 * macro is defined, it expands to the name of a "common" symbol. If 73 * this symbol is assigned a non-NULL pointer value, it is used as the 74 * address of the system call entrypoint. 75 * A pointer to this symbol can be obtained by calling 76 * get_syscall_entrypoint() 77 * 78 * This file defines a few internal symbols that all start with "LSS_". 79 * Do not access these symbols from outside this file. They are not part 80 * of the supported API. 81 */ 82 #ifndef SYS_LINUX_SYSCALL_SUPPORT_H 83 #define SYS_LINUX_SYSCALL_SUPPORT_H 84 85 /* We currently only support x86-32, x86-64, ARM, MIPS, and PPC on Linux. 86 * Porting to other related platforms should not be difficult. 87 */ 88 #if (defined(__i386__) || defined(__x86_64__) || defined(__ARM_ARCH_3__) || \ 89 defined(__mips__) || defined(__PPC__) || defined(__ARM_EABI__) || \ 90 defined(__aarch64__)) \ 91 && (defined(__linux) || defined(__ANDROID__)) 92 93 #ifndef SYS_CPLUSPLUS 94 #ifdef __cplusplus 95 /* Some system header files in older versions of gcc neglect to properly 96 * handle being included from C++. As it appears to be harmless to have 97 * multiple nested 'extern "C"' blocks, just add another one here. 98 */ 99 extern "C" { 100 #endif 101 102 #include <errno.h> 103 #include <fcntl.h> 104 #include <sched.h> 105 #include <signal.h> 106 #include <stdarg.h> 107 #include <stddef.h> 108 #include <stdint.h> 109 #include <string.h> 110 #include <sys/ptrace.h> 111 #include <sys/resource.h> 112 #include <sys/time.h> 113 #include <sys/types.h> 114 #include <sys/syscall.h> 115 #include <unistd.h> 116 #include <linux/unistd.h> 117 #include <endian.h> 118 119 #ifdef __mips__ 120 /* Include definitions of the ABI currently in use. */ 121 #include <sgidefs.h> 122 #endif 123 #endif 124 125 /* The Android NDK's <sys/stat.h> #defines these macros as aliases 126 * to their non-64 counterparts. To avoid naming conflict, remove them. */ 127 #ifdef __ANDROID__ 128 /* These are restored by the corresponding #pragma pop_macro near 129 * the end of this file. */ 130 # pragma push_macro("stat64") 131 # pragma push_macro("fstat64") 132 # pragma push_macro("lstat64") 133 # undef stat64 134 # undef fstat64 135 # undef lstat64 136 #endif 137 138 /* As glibc often provides subtly incompatible data structures (and implicit 139 * wrapper functions that convert them), we provide our own kernel data 140 * structures for use by the system calls. 141 * These structures have been developed by using Linux 2.6.23 headers for 142 * reference. Note though, we do not care about exact API compatibility 143 * with the kernel, and in fact the kernel often does not have a single 144 * API that works across architectures. Instead, we try to mimic the glibc 145 * API where reasonable, and only guarantee ABI compatibility with the 146 * kernel headers. 147 * Most notably, here are a few changes that were made to the structures 148 * defined by kernel headers: 149 * 150 * - we only define structures, but not symbolic names for kernel data 151 * types. For the latter, we directly use the native C datatype 152 * (i.e. "unsigned" instead of "mode_t"). 153 * - in a few cases, it is possible to define identical structures for 154 * both 32bit (e.g. i386) and 64bit (e.g. x86-64) platforms by 155 * standardizing on the 64bit version of the data types. In particular, 156 * this means that we use "unsigned" where the 32bit headers say 157 * "unsigned long". 158 * - overall, we try to minimize the number of cases where we need to 159 * conditionally define different structures. 160 * - the "struct kernel_sigaction" class of structures have been 161 * modified to more closely mimic glibc's API by introducing an 162 * anonymous union for the function pointer. 163 * - a small number of field names had to have an underscore appended to 164 * them, because glibc defines a global macro by the same name. 165 */ 166 167 /* include/linux/dirent.h */ 168 struct kernel_dirent64 { 169 unsigned long long d_ino; 170 long long d_off; 171 unsigned short d_reclen; 172 unsigned char d_type; 173 char d_name[256]; 174 }; 175 176 /* include/linux/dirent.h */ 177 #if defined(__aarch64__) 178 // aarch64 only defines dirent64, just uses that for dirent too. 179 #define kernel_dirent kernel_dirent64 180 #else 181 struct kernel_dirent { 182 long d_ino; 183 long d_off; 184 unsigned short d_reclen; 185 char d_name[256]; 186 }; 187 #endif 188 189 /* include/linux/uio.h */ 190 struct kernel_iovec { 191 void *iov_base; 192 unsigned long iov_len; 193 }; 194 195 /* include/linux/socket.h */ 196 struct kernel_msghdr { 197 void *msg_name; 198 int msg_namelen; 199 struct kernel_iovec*msg_iov; 200 unsigned long msg_iovlen; 201 void *msg_control; 202 unsigned long msg_controllen; 203 unsigned msg_flags; 204 }; 205 206 /* include/asm-generic/poll.h */ 207 struct kernel_pollfd { 208 int fd; 209 short events; 210 short revents; 211 }; 212 213 /* include/linux/resource.h */ 214 struct kernel_rlimit { 215 unsigned long rlim_cur; 216 unsigned long rlim_max; 217 }; 218 219 /* include/linux/time.h */ 220 struct kernel_timespec { 221 long tv_sec; 222 long tv_nsec; 223 }; 224 225 /* include/linux/time.h */ 226 struct kernel_timeval { 227 long tv_sec; 228 long tv_usec; 229 }; 230 231 /* include/linux/resource.h */ 232 struct kernel_rusage { 233 struct kernel_timeval ru_utime; 234 struct kernel_timeval ru_stime; 235 long ru_maxrss; 236 long ru_ixrss; 237 long ru_idrss; 238 long ru_isrss; 239 long ru_minflt; 240 long ru_majflt; 241 long ru_nswap; 242 long ru_inblock; 243 long ru_oublock; 244 long ru_msgsnd; 245 long ru_msgrcv; 246 long ru_nsignals; 247 long ru_nvcsw; 248 long ru_nivcsw; 249 }; 250 251 #if defined(__i386__) || defined(__ARM_EABI__) || defined(__ARM_ARCH_3__) \ 252 || defined(__PPC__) 253 254 /* include/asm-{arm,i386,mips,ppc}/signal.h */ 255 struct kernel_old_sigaction { 256 union { 257 void (*sa_handler_)(int); 258 void (*sa_sigaction_)(int, siginfo_t *, void *); 259 }; 260 unsigned long sa_mask; 261 unsigned long sa_flags; 262 void (*sa_restorer)(void); 263 } __attribute__((packed,aligned(4))); 264 #elif (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) 265 #define kernel_old_sigaction kernel_sigaction 266 #elif defined(__aarch64__) 267 // No kernel_old_sigaction defined for arm64. 268 #endif 269 270 /* Some kernel functions (e.g. sigaction() in 2.6.23) require that the 271 * exactly match the size of the signal set, even though the API was 272 * intended to be extensible. We define our own KERNEL_NSIG to deal with 273 * this. 274 * Please note that glibc provides signals [1.._NSIG-1], whereas the 275 * kernel (and this header) provides the range [1..KERNEL_NSIG]. The 276 * actual number of signals is obviously the same, but the constants 277 * differ by one. 278 */ 279 #ifdef __mips__ 280 #define KERNEL_NSIG 128 281 #else 282 #define KERNEL_NSIG 64 283 #endif 284 285 /* include/asm-{arm,aarch64,i386,mips,x86_64}/signal.h */ 286 struct kernel_sigset_t { 287 unsigned long sig[(KERNEL_NSIG + 8*sizeof(unsigned long) - 1)/ 288 (8*sizeof(unsigned long))]; 289 }; 290 291 /* include/asm-{arm,i386,mips,x86_64,ppc}/signal.h */ 292 struct kernel_sigaction { 293 #ifdef __mips__ 294 unsigned long sa_flags; 295 union { 296 void (*sa_handler_)(int); 297 void (*sa_sigaction_)(int, siginfo_t *, void *); 298 }; 299 struct kernel_sigset_t sa_mask; 300 #else 301 union { 302 void (*sa_handler_)(int); 303 void (*sa_sigaction_)(int, siginfo_t *, void *); 304 }; 305 unsigned long sa_flags; 306 void (*sa_restorer)(void); 307 struct kernel_sigset_t sa_mask; 308 #endif 309 }; 310 311 /* include/linux/socket.h */ 312 struct kernel_sockaddr { 313 unsigned short sa_family; 314 char sa_data[14]; 315 }; 316 317 /* include/asm-{arm,aarch64,i386,mips,ppc}/stat.h */ 318 #ifdef __mips__ 319 #if _MIPS_SIM == _MIPS_SIM_ABI64 320 struct kernel_stat { 321 #else 322 struct kernel_stat64 { 323 #endif 324 unsigned st_dev; 325 unsigned __pad0[3]; 326 unsigned long long st_ino; 327 unsigned st_mode; 328 unsigned st_nlink; 329 unsigned st_uid; 330 unsigned st_gid; 331 unsigned st_rdev; 332 unsigned __pad1[3]; 333 long long st_size; 334 unsigned st_atime_; 335 unsigned st_atime_nsec_; 336 unsigned st_mtime_; 337 unsigned st_mtime_nsec_; 338 unsigned st_ctime_; 339 unsigned st_ctime_nsec_; 340 unsigned st_blksize; 341 unsigned __pad2; 342 unsigned long long st_blocks; 343 }; 344 #elif defined __PPC__ 345 struct kernel_stat64 { 346 unsigned long long st_dev; 347 unsigned long long st_ino; 348 unsigned st_mode; 349 unsigned st_nlink; 350 unsigned st_uid; 351 unsigned st_gid; 352 unsigned long long st_rdev; 353 unsigned short int __pad2; 354 long long st_size; 355 long st_blksize; 356 long long st_blocks; 357 long st_atime_; 358 unsigned long st_atime_nsec_; 359 long st_mtime_; 360 unsigned long st_mtime_nsec_; 361 long st_ctime_; 362 unsigned long st_ctime_nsec_; 363 unsigned long __unused4; 364 unsigned long __unused5; 365 }; 366 #else 367 struct kernel_stat64 { 368 unsigned long long st_dev; 369 unsigned char __pad0[4]; 370 unsigned __st_ino; 371 unsigned st_mode; 372 unsigned st_nlink; 373 unsigned st_uid; 374 unsigned st_gid; 375 unsigned long long st_rdev; 376 unsigned char __pad3[4]; 377 long long st_size; 378 unsigned st_blksize; 379 unsigned long long st_blocks; 380 unsigned st_atime_; 381 unsigned st_atime_nsec_; 382 unsigned st_mtime_; 383 unsigned st_mtime_nsec_; 384 unsigned st_ctime_; 385 unsigned st_ctime_nsec_; 386 unsigned long long st_ino; 387 }; 388 #endif 389 390 /* include/asm-{arm,aarch64,i386,mips,x86_64,ppc}/stat.h */ 391 #if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) 392 struct kernel_stat { 393 /* The kernel headers suggest that st_dev and st_rdev should be 32bit 394 * quantities encoding 12bit major and 20bit minor numbers in an interleaved 395 * format. In reality, we do not see useful data in the top bits. So, 396 * we'll leave the padding in here, until we find a better solution. 397 */ 398 unsigned short st_dev; 399 short pad1; 400 unsigned st_ino; 401 unsigned short st_mode; 402 unsigned short st_nlink; 403 unsigned short st_uid; 404 unsigned short st_gid; 405 unsigned short st_rdev; 406 short pad2; 407 unsigned st_size; 408 unsigned st_blksize; 409 unsigned st_blocks; 410 unsigned st_atime_; 411 unsigned st_atime_nsec_; 412 unsigned st_mtime_; 413 unsigned st_mtime_nsec_; 414 unsigned st_ctime_; 415 unsigned st_ctime_nsec_; 416 unsigned __unused4; 417 unsigned __unused5; 418 }; 419 #elif defined(__x86_64__) 420 struct kernel_stat { 421 uint64_t st_dev; 422 uint64_t st_ino; 423 uint64_t st_nlink; 424 unsigned st_mode; 425 unsigned st_uid; 426 unsigned st_gid; 427 unsigned __pad0; 428 uint64_t st_rdev; 429 int64_t st_size; 430 int64_t st_blksize; 431 int64_t st_blocks; 432 uint64_t st_atime_; 433 uint64_t st_atime_nsec_; 434 uint64_t st_mtime_; 435 uint64_t st_mtime_nsec_; 436 uint64_t st_ctime_; 437 uint64_t st_ctime_nsec_; 438 int64_t __unused4[3]; 439 }; 440 #elif defined(__PPC__) 441 struct kernel_stat { 442 unsigned st_dev; 443 unsigned long st_ino; // ino_t 444 unsigned long st_mode; // mode_t 445 unsigned short st_nlink; // nlink_t 446 unsigned st_uid; // uid_t 447 unsigned st_gid; // gid_t 448 unsigned st_rdev; 449 long st_size; // off_t 450 unsigned long st_blksize; 451 unsigned long st_blocks; 452 unsigned long st_atime_; 453 unsigned long st_atime_nsec_; 454 unsigned long st_mtime_; 455 unsigned long st_mtime_nsec_; 456 unsigned long st_ctime_; 457 unsigned long st_ctime_nsec_; 458 unsigned long __unused4; 459 unsigned long __unused5; 460 }; 461 #elif (defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI64) 462 struct kernel_stat { 463 unsigned st_dev; 464 int st_pad1[3]; 465 unsigned st_ino; 466 unsigned st_mode; 467 unsigned st_nlink; 468 unsigned st_uid; 469 unsigned st_gid; 470 unsigned st_rdev; 471 int st_pad2[2]; 472 long st_size; 473 int st_pad3; 474 long st_atime_; 475 long st_atime_nsec_; 476 long st_mtime_; 477 long st_mtime_nsec_; 478 long st_ctime_; 479 long st_ctime_nsec_; 480 int st_blksize; 481 int st_blocks; 482 int st_pad4[14]; 483 }; 484 #elif defined(__aarch64__) 485 struct kernel_stat { 486 unsigned long st_dev; 487 unsigned long st_ino; 488 unsigned int st_mode; 489 unsigned int st_nlink; 490 unsigned int st_uid; 491 unsigned int st_gid; 492 unsigned long st_rdev; 493 unsigned long __pad1; 494 long st_size; 495 int st_blksize; 496 int __pad2; 497 long st_blocks; 498 long st_atime_; 499 unsigned long st_atime_nsec_; 500 long st_mtime_; 501 unsigned long st_mtime_nsec_; 502 long st_ctime_; 503 unsigned long st_ctime_nsec_; 504 unsigned int __unused4; 505 unsigned int __unused5; 506 }; 507 #endif 508 509 /* include/asm-{arm,aarch64,i386,mips,x86_64,ppc}/statfs.h */ 510 #ifdef __mips__ 511 #if _MIPS_SIM != _MIPS_SIM_ABI64 512 struct kernel_statfs64 { 513 unsigned long f_type; 514 unsigned long f_bsize; 515 unsigned long f_frsize; 516 unsigned long __pad; 517 unsigned long long f_blocks; 518 unsigned long long f_bfree; 519 unsigned long long f_files; 520 unsigned long long f_ffree; 521 unsigned long long f_bavail; 522 struct { int val[2]; } f_fsid; 523 unsigned long f_namelen; 524 unsigned long f_spare[6]; 525 }; 526 #endif 527 #elif !defined(__x86_64__) 528 struct kernel_statfs64 { 529 unsigned long f_type; 530 unsigned long f_bsize; 531 unsigned long long f_blocks; 532 unsigned long long f_bfree; 533 unsigned long long f_bavail; 534 unsigned long long f_files; 535 unsigned long long f_ffree; 536 struct { int val[2]; } f_fsid; 537 unsigned long f_namelen; 538 unsigned long f_frsize; 539 unsigned long f_spare[5]; 540 }; 541 #endif 542 543 /* include/asm-{arm,i386,mips,x86_64,ppc,generic}/statfs.h */ 544 #ifdef __mips__ 545 struct kernel_statfs { 546 long f_type; 547 long f_bsize; 548 long f_frsize; 549 long f_blocks; 550 long f_bfree; 551 long f_files; 552 long f_ffree; 553 long f_bavail; 554 struct { int val[2]; } f_fsid; 555 long f_namelen; 556 long f_spare[6]; 557 }; 558 #elif defined(__x86_64__) 559 struct kernel_statfs { 560 /* x86_64 actually defines all these fields as signed, whereas all other */ 561 /* platforms define them as unsigned. Leaving them at unsigned should not */ 562 /* cause any problems. Make sure these are 64-bit even on x32. */ 563 uint64_t f_type; 564 uint64_t f_bsize; 565 uint64_t f_blocks; 566 uint64_t f_bfree; 567 uint64_t f_bavail; 568 uint64_t f_files; 569 uint64_t f_ffree; 570 struct { int val[2]; } f_fsid; 571 uint64_t f_namelen; 572 uint64_t f_frsize; 573 uint64_t f_spare[5]; 574 }; 575 #else 576 struct kernel_statfs { 577 unsigned long f_type; 578 unsigned long f_bsize; 579 unsigned long f_blocks; 580 unsigned long f_bfree; 581 unsigned long f_bavail; 582 unsigned long f_files; 583 unsigned long f_ffree; 584 struct { int val[2]; } f_fsid; 585 unsigned long f_namelen; 586 unsigned long f_frsize; 587 unsigned long f_spare[5]; 588 }; 589 #endif 590 591 592 /* Definitions missing from the standard header files */ 593 #ifndef O_DIRECTORY 594 #if defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || defined(__aarch64__) 595 #define O_DIRECTORY 0040000 596 #else 597 #define O_DIRECTORY 0200000 598 #endif 599 #endif 600 #ifndef NT_PRXFPREG 601 #define NT_PRXFPREG 0x46e62b7f 602 #endif 603 #ifndef PTRACE_GETFPXREGS 604 #define PTRACE_GETFPXREGS ((enum __ptrace_request)18) 605 #endif 606 #ifndef PR_GET_DUMPABLE 607 #define PR_GET_DUMPABLE 3 608 #endif 609 #ifndef PR_SET_DUMPABLE 610 #define PR_SET_DUMPABLE 4 611 #endif 612 #ifndef PR_GET_SECCOMP 613 #define PR_GET_SECCOMP 21 614 #endif 615 #ifndef PR_SET_SECCOMP 616 #define PR_SET_SECCOMP 22 617 #endif 618 #ifndef AT_FDCWD 619 #define AT_FDCWD (-100) 620 #endif 621 #ifndef AT_SYMLINK_NOFOLLOW 622 #define AT_SYMLINK_NOFOLLOW 0x100 623 #endif 624 #ifndef AT_REMOVEDIR 625 #define AT_REMOVEDIR 0x200 626 #endif 627 #ifndef MREMAP_FIXED 628 #define MREMAP_FIXED 2 629 #endif 630 #ifndef SA_RESTORER 631 #define SA_RESTORER 0x04000000 632 #endif 633 #ifndef CPUCLOCK_PROF 634 #define CPUCLOCK_PROF 0 635 #endif 636 #ifndef CPUCLOCK_VIRT 637 #define CPUCLOCK_VIRT 1 638 #endif 639 #ifndef CPUCLOCK_SCHED 640 #define CPUCLOCK_SCHED 2 641 #endif 642 #ifndef CPUCLOCK_PERTHREAD_MASK 643 #define CPUCLOCK_PERTHREAD_MASK 4 644 #endif 645 #ifndef MAKE_PROCESS_CPUCLOCK 646 #define MAKE_PROCESS_CPUCLOCK(pid, clock) \ 647 ((~(int)(pid) << 3) | (int)(clock)) 648 #endif 649 #ifndef MAKE_THREAD_CPUCLOCK 650 #define MAKE_THREAD_CPUCLOCK(tid, clock) \ 651 ((~(int)(tid) << 3) | (int)((clock) | CPUCLOCK_PERTHREAD_MASK)) 652 #endif 653 654 #ifndef FUTEX_WAIT 655 #define FUTEX_WAIT 0 656 #endif 657 #ifndef FUTEX_WAKE 658 #define FUTEX_WAKE 1 659 #endif 660 #ifndef FUTEX_FD 661 #define FUTEX_FD 2 662 #endif 663 #ifndef FUTEX_REQUEUE 664 #define FUTEX_REQUEUE 3 665 #endif 666 #ifndef FUTEX_CMP_REQUEUE 667 #define FUTEX_CMP_REQUEUE 4 668 #endif 669 #ifndef FUTEX_WAKE_OP 670 #define FUTEX_WAKE_OP 5 671 #endif 672 #ifndef FUTEX_LOCK_PI 673 #define FUTEX_LOCK_PI 6 674 #endif 675 #ifndef FUTEX_UNLOCK_PI 676 #define FUTEX_UNLOCK_PI 7 677 #endif 678 #ifndef FUTEX_TRYLOCK_PI 679 #define FUTEX_TRYLOCK_PI 8 680 #endif 681 #ifndef FUTEX_PRIVATE_FLAG 682 #define FUTEX_PRIVATE_FLAG 128 683 #endif 684 #ifndef FUTEX_CMD_MASK 685 #define FUTEX_CMD_MASK ~FUTEX_PRIVATE_FLAG 686 #endif 687 #ifndef FUTEX_WAIT_PRIVATE 688 #define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG) 689 #endif 690 #ifndef FUTEX_WAKE_PRIVATE 691 #define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG) 692 #endif 693 #ifndef FUTEX_REQUEUE_PRIVATE 694 #define FUTEX_REQUEUE_PRIVATE (FUTEX_REQUEUE | FUTEX_PRIVATE_FLAG) 695 #endif 696 #ifndef FUTEX_CMP_REQUEUE_PRIVATE 697 #define FUTEX_CMP_REQUEUE_PRIVATE (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG) 698 #endif 699 #ifndef FUTEX_WAKE_OP_PRIVATE 700 #define FUTEX_WAKE_OP_PRIVATE (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG) 701 #endif 702 #ifndef FUTEX_LOCK_PI_PRIVATE 703 #define FUTEX_LOCK_PI_PRIVATE (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG) 704 #endif 705 #ifndef FUTEX_UNLOCK_PI_PRIVATE 706 #define FUTEX_UNLOCK_PI_PRIVATE (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG) 707 #endif 708 #ifndef FUTEX_TRYLOCK_PI_PRIVATE 709 #define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG) 710 #endif 711 712 713 #if defined(__x86_64__) 714 #ifndef ARCH_SET_GS 715 #define ARCH_SET_GS 0x1001 716 #endif 717 #ifndef ARCH_GET_GS 718 #define ARCH_GET_GS 0x1004 719 #endif 720 #endif 721 722 #if defined(__i386__) 723 #ifndef __NR_quotactl 724 #define __NR_quotactl 131 725 #endif 726 #ifndef __NR_setresuid 727 #define __NR_setresuid 164 728 #define __NR_getresuid 165 729 #define __NR_setresgid 170 730 #define __NR_getresgid 171 731 #endif 732 #ifndef __NR_rt_sigaction 733 #define __NR_rt_sigreturn 173 734 #define __NR_rt_sigaction 174 735 #define __NR_rt_sigprocmask 175 736 #define __NR_rt_sigpending 176 737 #define __NR_rt_sigsuspend 179 738 #endif 739 #ifndef __NR_pread64 740 #define __NR_pread64 180 741 #endif 742 #ifndef __NR_pwrite64 743 #define __NR_pwrite64 181 744 #endif 745 #ifndef __NR_ugetrlimit 746 #define __NR_ugetrlimit 191 747 #endif 748 #ifndef __NR_stat64 749 #define __NR_stat64 195 750 #endif 751 #ifndef __NR_fstat64 752 #define __NR_fstat64 197 753 #endif 754 #ifndef __NR_setresuid32 755 #define __NR_setresuid32 208 756 #define __NR_getresuid32 209 757 #define __NR_setresgid32 210 758 #define __NR_getresgid32 211 759 #endif 760 #ifndef __NR_setfsuid32 761 #define __NR_setfsuid32 215 762 #define __NR_setfsgid32 216 763 #endif 764 #ifndef __NR_getdents64 765 #define __NR_getdents64 220 766 #endif 767 #ifndef __NR_gettid 768 #define __NR_gettid 224 769 #endif 770 #ifndef __NR_readahead 771 #define __NR_readahead 225 772 #endif 773 #ifndef __NR_setxattr 774 #define __NR_setxattr 226 775 #endif 776 #ifndef __NR_lsetxattr 777 #define __NR_lsetxattr 227 778 #endif 779 #ifndef __NR_getxattr 780 #define __NR_getxattr 229 781 #endif 782 #ifndef __NR_lgetxattr 783 #define __NR_lgetxattr 230 784 #endif 785 #ifndef __NR_listxattr 786 #define __NR_listxattr 232 787 #endif 788 #ifndef __NR_llistxattr 789 #define __NR_llistxattr 233 790 #endif 791 #ifndef __NR_tkill 792 #define __NR_tkill 238 793 #endif 794 #ifndef __NR_futex 795 #define __NR_futex 240 796 #endif 797 #ifndef __NR_sched_setaffinity 798 #define __NR_sched_setaffinity 241 799 #define __NR_sched_getaffinity 242 800 #endif 801 #ifndef __NR_set_tid_address 802 #define __NR_set_tid_address 258 803 #endif 804 #ifndef __NR_clock_gettime 805 #define __NR_clock_gettime 265 806 #endif 807 #ifndef __NR_clock_getres 808 #define __NR_clock_getres 266 809 #endif 810 #ifndef __NR_statfs64 811 #define __NR_statfs64 268 812 #endif 813 #ifndef __NR_fstatfs64 814 #define __NR_fstatfs64 269 815 #endif 816 #ifndef __NR_fadvise64_64 817 #define __NR_fadvise64_64 272 818 #endif 819 #ifndef __NR_ioprio_set 820 #define __NR_ioprio_set 289 821 #endif 822 #ifndef __NR_ioprio_get 823 #define __NR_ioprio_get 290 824 #endif 825 #ifndef __NR_openat 826 #define __NR_openat 295 827 #endif 828 #ifndef __NR_fstatat64 829 #define __NR_fstatat64 300 830 #endif 831 #ifndef __NR_unlinkat 832 #define __NR_unlinkat 301 833 #endif 834 #ifndef __NR_move_pages 835 #define __NR_move_pages 317 836 #endif 837 #ifndef __NR_getcpu 838 #define __NR_getcpu 318 839 #endif 840 #ifndef __NR_fallocate 841 #define __NR_fallocate 324 842 #endif 843 /* End of i386 definitions */ 844 #elif defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) 845 #ifndef __NR_setresuid 846 #define __NR_setresuid (__NR_SYSCALL_BASE + 164) 847 #define __NR_getresuid (__NR_SYSCALL_BASE + 165) 848 #define __NR_setresgid (__NR_SYSCALL_BASE + 170) 849 #define __NR_getresgid (__NR_SYSCALL_BASE + 171) 850 #endif 851 #ifndef __NR_rt_sigaction 852 #define __NR_rt_sigreturn (__NR_SYSCALL_BASE + 173) 853 #define __NR_rt_sigaction (__NR_SYSCALL_BASE + 174) 854 #define __NR_rt_sigprocmask (__NR_SYSCALL_BASE + 175) 855 #define __NR_rt_sigpending (__NR_SYSCALL_BASE + 176) 856 #define __NR_rt_sigsuspend (__NR_SYSCALL_BASE + 179) 857 #endif 858 #ifndef __NR_pread64 859 #define __NR_pread64 (__NR_SYSCALL_BASE + 180) 860 #endif 861 #ifndef __NR_pwrite64 862 #define __NR_pwrite64 (__NR_SYSCALL_BASE + 181) 863 #endif 864 #ifndef __NR_ugetrlimit 865 #define __NR_ugetrlimit (__NR_SYSCALL_BASE + 191) 866 #endif 867 #ifndef __NR_stat64 868 #define __NR_stat64 (__NR_SYSCALL_BASE + 195) 869 #endif 870 #ifndef __NR_fstat64 871 #define __NR_fstat64 (__NR_SYSCALL_BASE + 197) 872 #endif 873 #ifndef __NR_setresuid32 874 #define __NR_setresuid32 (__NR_SYSCALL_BASE + 208) 875 #define __NR_getresuid32 (__NR_SYSCALL_BASE + 209) 876 #define __NR_setresgid32 (__NR_SYSCALL_BASE + 210) 877 #define __NR_getresgid32 (__NR_SYSCALL_BASE + 211) 878 #endif 879 #ifndef __NR_setfsuid32 880 #define __NR_setfsuid32 (__NR_SYSCALL_BASE + 215) 881 #define __NR_setfsgid32 (__NR_SYSCALL_BASE + 216) 882 #endif 883 #ifndef __NR_getdents64 884 #define __NR_getdents64 (__NR_SYSCALL_BASE + 217) 885 #endif 886 #ifndef __NR_gettid 887 #define __NR_gettid (__NR_SYSCALL_BASE + 224) 888 #endif 889 #ifndef __NR_readahead 890 #define __NR_readahead (__NR_SYSCALL_BASE + 225) 891 #endif 892 #ifndef __NR_setxattr 893 #define __NR_setxattr (__NR_SYSCALL_BASE + 226) 894 #endif 895 #ifndef __NR_lsetxattr 896 #define __NR_lsetxattr (__NR_SYSCALL_BASE + 227) 897 #endif 898 #ifndef __NR_getxattr 899 #define __NR_getxattr (__NR_SYSCALL_BASE + 229) 900 #endif 901 #ifndef __NR_lgetxattr 902 #define __NR_lgetxattr (__NR_SYSCALL_BASE + 230) 903 #endif 904 #ifndef __NR_listxattr 905 #define __NR_listxattr (__NR_SYSCALL_BASE + 232) 906 #endif 907 #ifndef __NR_llistxattr 908 #define __NR_llistxattr (__NR_SYSCALL_BASE + 233) 909 #endif 910 #ifndef __NR_tkill 911 #define __NR_tkill (__NR_SYSCALL_BASE + 238) 912 #endif 913 #ifndef __NR_futex 914 #define __NR_futex (__NR_SYSCALL_BASE + 240) 915 #endif 916 #ifndef __NR_sched_setaffinity 917 #define __NR_sched_setaffinity (__NR_SYSCALL_BASE + 241) 918 #define __NR_sched_getaffinity (__NR_SYSCALL_BASE + 242) 919 #endif 920 #ifndef __NR_set_tid_address 921 #define __NR_set_tid_address (__NR_SYSCALL_BASE + 256) 922 #endif 923 #ifndef __NR_clock_gettime 924 #define __NR_clock_gettime (__NR_SYSCALL_BASE + 263) 925 #endif 926 #ifndef __NR_clock_getres 927 #define __NR_clock_getres (__NR_SYSCALL_BASE + 264) 928 #endif 929 #ifndef __NR_statfs64 930 #define __NR_statfs64 (__NR_SYSCALL_BASE + 266) 931 #endif 932 #ifndef __NR_fstatfs64 933 #define __NR_fstatfs64 (__NR_SYSCALL_BASE + 267) 934 #endif 935 #ifndef __NR_ioprio_set 936 #define __NR_ioprio_set (__NR_SYSCALL_BASE + 314) 937 #endif 938 #ifndef __NR_ioprio_get 939 #define __NR_ioprio_get (__NR_SYSCALL_BASE + 315) 940 #endif 941 #ifndef __NR_move_pages 942 #define __NR_move_pages (__NR_SYSCALL_BASE + 344) 943 #endif 944 #ifndef __NR_getcpu 945 #define __NR_getcpu (__NR_SYSCALL_BASE + 345) 946 #endif 947 /* End of ARM 3/EABI definitions */ 948 #elif defined(__aarch64__) 949 #ifndef __NR_setxattr 950 #define __NR_setxattr 5 951 #endif 952 #ifndef __NR_lsetxattr 953 #define __NR_lsetxattr 6 954 #endif 955 #ifndef __NR_getxattr 956 #define __NR_getxattr 8 957 #endif 958 #ifndef __NR_lgetxattr 959 #define __NR_lgetxattr 9 960 #endif 961 #ifndef __NR_listxattr 962 #define __NR_listxattr 11 963 #endif 964 #ifndef __NR_llistxattr 965 #define __NR_llistxattr 12 966 #endif 967 #ifndef __NR_ioprio_set 968 #define __NR_ioprio_set 30 969 #endif 970 #ifndef __NR_ioprio_get 971 #define __NR_ioprio_get 31 972 #endif 973 #ifndef __NR_unlinkat 974 #define __NR_unlinkat 35 975 #endif 976 #ifndef __NR_fallocate 977 #define __NR_fallocate 47 978 #endif 979 #ifndef __NR_openat 980 #define __NR_openat 56 981 #endif 982 #ifndef __NR_quotactl 983 #define __NR_quotactl 60 984 #endif 985 #ifndef __NR_getdents64 986 #define __NR_getdents64 61 987 #endif 988 #ifndef __NR_getdents 989 #define __NR_getdents __NR_getdents64 990 #endif 991 #ifndef __NR_pread64 992 #define __NR_pread64 67 993 #endif 994 #ifndef __NR_pwrite64 995 #define __NR_pwrite64 68 996 #endif 997 #ifndef __NR_ppoll 998 #define __NR_ppoll 73 999 #endif 1000 #ifndef __NR_readlinkat 1001 #define __NR_readlinkat 78 1002 #endif 1003 #ifndef __NR_newfstatat 1004 #define __NR_newfstatat 79 1005 #endif 1006 #ifndef __NR_set_tid_address 1007 #define __NR_set_tid_address 96 1008 #endif 1009 #ifndef __NR_futex 1010 #define __NR_futex 98 1011 #endif 1012 #ifndef __NR_clock_gettime 1013 #define __NR_clock_gettime 113 1014 #endif 1015 #ifndef __NR_clock_getres 1016 #define __NR_clock_getres 114 1017 #endif 1018 #ifndef __NR_sched_setaffinity 1019 #define __NR_sched_setaffinity 122 1020 #define __NR_sched_getaffinity 123 1021 #endif 1022 #ifndef __NR_tkill 1023 #define __NR_tkill 130 1024 #endif 1025 #ifndef __NR_setresuid 1026 #define __NR_setresuid 147 1027 #define __NR_getresuid 148 1028 #define __NR_setresgid 149 1029 #define __NR_getresgid 150 1030 #endif 1031 #ifndef __NR_gettid 1032 #define __NR_gettid 178 1033 #endif 1034 #ifndef __NR_readahead 1035 #define __NR_readahead 213 1036 #endif 1037 #ifndef __NR_fadvise64 1038 #define __NR_fadvise64 223 1039 #endif 1040 #ifndef __NR_move_pages 1041 #define __NR_move_pages 239 1042 #endif 1043 /* End of aarch64 definitions */ 1044 #elif defined(__x86_64__) 1045 #ifndef __NR_pread64 1046 #define __NR_pread64 17 1047 #endif 1048 #ifndef __NR_pwrite64 1049 #define __NR_pwrite64 18 1050 #endif 1051 #ifndef __NR_setresuid 1052 #define __NR_setresuid 117 1053 #define __NR_getresuid 118 1054 #define __NR_setresgid 119 1055 #define __NR_getresgid 120 1056 #endif 1057 #ifndef __NR_quotactl 1058 #define __NR_quotactl 179 1059 #endif 1060 #ifndef __NR_gettid 1061 #define __NR_gettid 186 1062 #endif 1063 #ifndef __NR_readahead 1064 #define __NR_readahead 187 1065 #endif 1066 #ifndef __NR_setxattr 1067 #define __NR_setxattr 188 1068 #endif 1069 #ifndef __NR_lsetxattr 1070 #define __NR_lsetxattr 189 1071 #endif 1072 #ifndef __NR_getxattr 1073 #define __NR_getxattr 191 1074 #endif 1075 #ifndef __NR_lgetxattr 1076 #define __NR_lgetxattr 192 1077 #endif 1078 #ifndef __NR_listxattr 1079 #define __NR_listxattr 194 1080 #endif 1081 #ifndef __NR_llistxattr 1082 #define __NR_llistxattr 195 1083 #endif 1084 #ifndef __NR_tkill 1085 #define __NR_tkill 200 1086 #endif 1087 #ifndef __NR_futex 1088 #define __NR_futex 202 1089 #endif 1090 #ifndef __NR_sched_setaffinity 1091 #define __NR_sched_setaffinity 203 1092 #define __NR_sched_getaffinity 204 1093 #endif 1094 #ifndef __NR_getdents64 1095 #define __NR_getdents64 217 1096 #endif 1097 #ifndef __NR_set_tid_address 1098 #define __NR_set_tid_address 218 1099 #endif 1100 #ifndef __NR_fadvise64 1101 #define __NR_fadvise64 221 1102 #endif 1103 #ifndef __NR_clock_gettime 1104 #define __NR_clock_gettime 228 1105 #endif 1106 #ifndef __NR_clock_getres 1107 #define __NR_clock_getres 229 1108 #endif 1109 #ifndef __NR_ioprio_set 1110 #define __NR_ioprio_set 251 1111 #endif 1112 #ifndef __NR_ioprio_get 1113 #define __NR_ioprio_get 252 1114 #endif 1115 #ifndef __NR_openat 1116 #define __NR_openat 257 1117 #endif 1118 #ifndef __NR_newfstatat 1119 #define __NR_newfstatat 262 1120 #endif 1121 #ifndef __NR_unlinkat 1122 #define __NR_unlinkat 263 1123 #endif 1124 #ifndef __NR_move_pages 1125 #define __NR_move_pages 279 1126 #endif 1127 #ifndef __NR_fallocate 1128 #define __NR_fallocate 285 1129 #endif 1130 /* End of x86-64 definitions */ 1131 #elif defined(__mips__) 1132 #if _MIPS_SIM == _MIPS_SIM_ABI32 1133 #ifndef __NR_setresuid 1134 #define __NR_setresuid (__NR_Linux + 185) 1135 #define __NR_getresuid (__NR_Linux + 186) 1136 #define __NR_setresgid (__NR_Linux + 190) 1137 #define __NR_getresgid (__NR_Linux + 191) 1138 #endif 1139 #ifndef __NR_rt_sigaction 1140 #define __NR_rt_sigreturn (__NR_Linux + 193) 1141 #define __NR_rt_sigaction (__NR_Linux + 194) 1142 #define __NR_rt_sigprocmask (__NR_Linux + 195) 1143 #define __NR_rt_sigpending (__NR_Linux + 196) 1144 #define __NR_rt_sigsuspend (__NR_Linux + 199) 1145 #endif 1146 #ifndef __NR_pread64 1147 #define __NR_pread64 (__NR_Linux + 200) 1148 #endif 1149 #ifndef __NR_pwrite64 1150 #define __NR_pwrite64 (__NR_Linux + 201) 1151 #endif 1152 #ifndef __NR_stat64 1153 #define __NR_stat64 (__NR_Linux + 213) 1154 #endif 1155 #ifndef __NR_fstat64 1156 #define __NR_fstat64 (__NR_Linux + 215) 1157 #endif 1158 #ifndef __NR_getdents64 1159 #define __NR_getdents64 (__NR_Linux + 219) 1160 #endif 1161 #ifndef __NR_gettid 1162 #define __NR_gettid (__NR_Linux + 222) 1163 #endif 1164 #ifndef __NR_readahead 1165 #define __NR_readahead (__NR_Linux + 223) 1166 #endif 1167 #ifndef __NR_setxattr 1168 #define __NR_setxattr (__NR_Linux + 224) 1169 #endif 1170 #ifndef __NR_lsetxattr 1171 #define __NR_lsetxattr (__NR_Linux + 225) 1172 #endif 1173 #ifndef __NR_getxattr 1174 #define __NR_getxattr (__NR_Linux + 227) 1175 #endif 1176 #ifndef __NR_lgetxattr 1177 #define __NR_lgetxattr (__NR_Linux + 228) 1178 #endif 1179 #ifndef __NR_listxattr 1180 #define __NR_listxattr (__NR_Linux + 230) 1181 #endif 1182 #ifndef __NR_llistxattr 1183 #define __NR_llistxattr (__NR_Linux + 231) 1184 #endif 1185 #ifndef __NR_tkill 1186 #define __NR_tkill (__NR_Linux + 236) 1187 #endif 1188 #ifndef __NR_futex 1189 #define __NR_futex (__NR_Linux + 238) 1190 #endif 1191 #ifndef __NR_sched_setaffinity 1192 #define __NR_sched_setaffinity (__NR_Linux + 239) 1193 #define __NR_sched_getaffinity (__NR_Linux + 240) 1194 #endif 1195 #ifndef __NR_set_tid_address 1196 #define __NR_set_tid_address (__NR_Linux + 252) 1197 #endif 1198 #ifndef __NR_statfs64 1199 #define __NR_statfs64 (__NR_Linux + 255) 1200 #endif 1201 #ifndef __NR_fstatfs64 1202 #define __NR_fstatfs64 (__NR_Linux + 256) 1203 #endif 1204 #ifndef __NR_clock_gettime 1205 #define __NR_clock_gettime (__NR_Linux + 263) 1206 #endif 1207 #ifndef __NR_clock_getres 1208 #define __NR_clock_getres (__NR_Linux + 264) 1209 #endif 1210 #ifndef __NR_openat 1211 #define __NR_openat (__NR_Linux + 288) 1212 #endif 1213 #ifndef __NR_fstatat 1214 #define __NR_fstatat (__NR_Linux + 293) 1215 #endif 1216 #ifndef __NR_unlinkat 1217 #define __NR_unlinkat (__NR_Linux + 294) 1218 #endif 1219 #ifndef __NR_move_pages 1220 #define __NR_move_pages (__NR_Linux + 308) 1221 #endif 1222 #ifndef __NR_getcpu 1223 #define __NR_getcpu (__NR_Linux + 312) 1224 #endif 1225 #ifndef __NR_ioprio_set 1226 #define __NR_ioprio_set (__NR_Linux + 314) 1227 #endif 1228 #ifndef __NR_ioprio_get 1229 #define __NR_ioprio_get (__NR_Linux + 315) 1230 #endif 1231 /* End of MIPS (old 32bit API) definitions */ 1232 #elif _MIPS_SIM == _MIPS_SIM_ABI64 1233 #ifndef __NR_pread64 1234 #define __NR_pread64 (__NR_Linux + 16) 1235 #endif 1236 #ifndef __NR_pwrite64 1237 #define __NR_pwrite64 (__NR_Linux + 17) 1238 #endif 1239 #ifndef __NR_setresuid 1240 #define __NR_setresuid (__NR_Linux + 115) 1241 #define __NR_getresuid (__NR_Linux + 116) 1242 #define __NR_setresgid (__NR_Linux + 117) 1243 #define __NR_getresgid (__NR_Linux + 118) 1244 #endif 1245 #ifndef __NR_gettid 1246 #define __NR_gettid (__NR_Linux + 178) 1247 #endif 1248 #ifndef __NR_readahead 1249 #define __NR_readahead (__NR_Linux + 179) 1250 #endif 1251 #ifndef __NR_setxattr 1252 #define __NR_setxattr (__NR_Linux + 180) 1253 #endif 1254 #ifndef __NR_lsetxattr 1255 #define __NR_lsetxattr (__NR_Linux + 181) 1256 #endif 1257 #ifndef __NR_getxattr 1258 #define __NR_getxattr (__NR_Linux + 183) 1259 #endif 1260 #ifndef __NR_lgetxattr 1261 #define __NR_lgetxattr (__NR_Linux + 184) 1262 #endif 1263 #ifndef __NR_listxattr 1264 #define __NR_listxattr (__NR_Linux + 186) 1265 #endif 1266 #ifndef __NR_llistxattr 1267 #define __NR_llistxattr (__NR_Linux + 187) 1268 #endif 1269 #ifndef __NR_tkill 1270 #define __NR_tkill (__NR_Linux + 192) 1271 #endif 1272 #ifndef __NR_futex 1273 #define __NR_futex (__NR_Linux + 194) 1274 #endif 1275 #ifndef __NR_sched_setaffinity 1276 #define __NR_sched_setaffinity (__NR_Linux + 195) 1277 #define __NR_sched_getaffinity (__NR_Linux + 196) 1278 #endif 1279 #ifndef __NR_set_tid_address 1280 #define __NR_set_tid_address (__NR_Linux + 212) 1281 #endif 1282 #ifndef __NR_clock_gettime 1283 #define __NR_clock_gettime (__NR_Linux + 222) 1284 #endif 1285 #ifndef __NR_clock_getres 1286 #define __NR_clock_getres (__NR_Linux + 223) 1287 #endif 1288 #ifndef __NR_openat 1289 #define __NR_openat (__NR_Linux + 247) 1290 #endif 1291 #ifndef __NR_fstatat 1292 #define __NR_fstatat (__NR_Linux + 252) 1293 #endif 1294 #ifndef __NR_unlinkat 1295 #define __NR_unlinkat (__NR_Linux + 253) 1296 #endif 1297 #ifndef __NR_move_pages 1298 #define __NR_move_pages (__NR_Linux + 267) 1299 #endif 1300 #ifndef __NR_getcpu 1301 #define __NR_getcpu (__NR_Linux + 271) 1302 #endif 1303 #ifndef __NR_ioprio_set 1304 #define __NR_ioprio_set (__NR_Linux + 273) 1305 #endif 1306 #ifndef __NR_ioprio_get 1307 #define __NR_ioprio_get (__NR_Linux + 274) 1308 #endif 1309 /* End of MIPS (64bit API) definitions */ 1310 #else 1311 #ifndef __NR_setresuid 1312 #define __NR_setresuid (__NR_Linux + 115) 1313 #define __NR_getresuid (__NR_Linux + 116) 1314 #define __NR_setresgid (__NR_Linux + 117) 1315 #define __NR_getresgid (__NR_Linux + 118) 1316 #endif 1317 #ifndef __NR_gettid 1318 #define __NR_gettid (__NR_Linux + 178) 1319 #endif 1320 #ifndef __NR_readahead 1321 #define __NR_readahead (__NR_Linux + 179) 1322 #endif 1323 #ifndef __NR_setxattr 1324 #define __NR_setxattr (__NR_Linux + 180) 1325 #endif 1326 #ifndef __NR_lsetxattr 1327 #define __NR_lsetxattr (__NR_Linux + 181) 1328 #endif 1329 #ifndef __NR_getxattr 1330 #define __NR_getxattr (__NR_Linux + 183) 1331 #endif 1332 #ifndef __NR_lgetxattr 1333 #define __NR_lgetxattr (__NR_Linux + 184) 1334 #endif 1335 #ifndef __NR_listxattr 1336 #define __NR_listxattr (__NR_Linux + 186) 1337 #endif 1338 #ifndef __NR_llistxattr 1339 #define __NR_llistxattr (__NR_Linux + 187) 1340 #endif 1341 #ifndef __NR_tkill 1342 #define __NR_tkill (__NR_Linux + 192) 1343 #endif 1344 #ifndef __NR_futex 1345 #define __NR_futex (__NR_Linux + 194) 1346 #endif 1347 #ifndef __NR_sched_setaffinity 1348 #define __NR_sched_setaffinity (__NR_Linux + 195) 1349 #define __NR_sched_getaffinity (__NR_Linux + 196) 1350 #endif 1351 #ifndef __NR_set_tid_address 1352 #define __NR_set_tid_address (__NR_Linux + 213) 1353 #endif 1354 #ifndef __NR_statfs64 1355 #define __NR_statfs64 (__NR_Linux + 217) 1356 #endif 1357 #ifndef __NR_fstatfs64 1358 #define __NR_fstatfs64 (__NR_Linux + 218) 1359 #endif 1360 #ifndef __NR_clock_gettime 1361 #define __NR_clock_gettime (__NR_Linux + 226) 1362 #endif 1363 #ifndef __NR_clock_getres 1364 #define __NR_clock_getres (__NR_Linux + 227) 1365 #endif 1366 #ifndef __NR_openat 1367 #define __NR_openat (__NR_Linux + 251) 1368 #endif 1369 #ifndef __NR_fstatat 1370 #define __NR_fstatat (__NR_Linux + 256) 1371 #endif 1372 #ifndef __NR_unlinkat 1373 #define __NR_unlinkat (__NR_Linux + 257) 1374 #endif 1375 #ifndef __NR_move_pages 1376 #define __NR_move_pages (__NR_Linux + 271) 1377 #endif 1378 #ifndef __NR_getcpu 1379 #define __NR_getcpu (__NR_Linux + 275) 1380 #endif 1381 #ifndef __NR_ioprio_set 1382 #define __NR_ioprio_set (__NR_Linux + 277) 1383 #endif 1384 #ifndef __NR_ioprio_get 1385 #define __NR_ioprio_get (__NR_Linux + 278) 1386 #endif 1387 /* End of MIPS (new 32bit API) definitions */ 1388 #endif 1389 /* End of MIPS definitions */ 1390 #elif defined(__PPC__) 1391 #ifndef __NR_setfsuid 1392 #define __NR_setfsuid 138 1393 #define __NR_setfsgid 139 1394 #endif 1395 #ifndef __NR_setresuid 1396 #define __NR_setresuid 164 1397 #define __NR_getresuid 165 1398 #define __NR_setresgid 169 1399 #define __NR_getresgid 170 1400 #endif 1401 #ifndef __NR_rt_sigaction 1402 #define __NR_rt_sigreturn 172 1403 #define __NR_rt_sigaction 173 1404 #define __NR_rt_sigprocmask 174 1405 #define __NR_rt_sigpending 175 1406 #define __NR_rt_sigsuspend 178 1407 #endif 1408 #ifndef __NR_pread64 1409 #define __NR_pread64 179 1410 #endif 1411 #ifndef __NR_pwrite64 1412 #define __NR_pwrite64 180 1413 #endif 1414 #ifndef __NR_ugetrlimit 1415 #define __NR_ugetrlimit 190 1416 #endif 1417 #ifndef __NR_readahead 1418 #define __NR_readahead 191 1419 #endif 1420 #ifndef __NR_stat64 1421 #define __NR_stat64 195 1422 #endif 1423 #ifndef __NR_fstat64 1424 #define __NR_fstat64 197 1425 #endif 1426 #ifndef __NR_getdents64 1427 #define __NR_getdents64 202 1428 #endif 1429 #ifndef __NR_gettid 1430 #define __NR_gettid 207 1431 #endif 1432 #ifndef __NR_tkill 1433 #define __NR_tkill 208 1434 #endif 1435 #ifndef __NR_setxattr 1436 #define __NR_setxattr 209 1437 #endif 1438 #ifndef __NR_lsetxattr 1439 #define __NR_lsetxattr 210 1440 #endif 1441 #ifndef __NR_getxattr 1442 #define __NR_getxattr 212 1443 #endif 1444 #ifndef __NR_lgetxattr 1445 #define __NR_lgetxattr 213 1446 #endif 1447 #ifndef __NR_listxattr 1448 #define __NR_listxattr 215 1449 #endif 1450 #ifndef __NR_llistxattr 1451 #define __NR_llistxattr 216 1452 #endif 1453 #ifndef __NR_futex 1454 #define __NR_futex 221 1455 #endif 1456 #ifndef __NR_sched_setaffinity 1457 #define __NR_sched_setaffinity 222 1458 #define __NR_sched_getaffinity 223 1459 #endif 1460 #ifndef __NR_set_tid_address 1461 #define __NR_set_tid_address 232 1462 #endif 1463 #ifndef __NR_clock_gettime 1464 #define __NR_clock_gettime 246 1465 #endif 1466 #ifndef __NR_clock_getres 1467 #define __NR_clock_getres 247 1468 #endif 1469 #ifndef __NR_statfs64 1470 #define __NR_statfs64 252 1471 #endif 1472 #ifndef __NR_fstatfs64 1473 #define __NR_fstatfs64 253 1474 #endif 1475 #ifndef __NR_fadvise64_64 1476 #define __NR_fadvise64_64 254 1477 #endif 1478 #ifndef __NR_ioprio_set 1479 #define __NR_ioprio_set 273 1480 #endif 1481 #ifndef __NR_ioprio_get 1482 #define __NR_ioprio_get 274 1483 #endif 1484 #ifndef __NR_openat 1485 #define __NR_openat 286 1486 #endif 1487 #ifndef __NR_fstatat64 1488 #define __NR_fstatat64 291 1489 #endif 1490 #ifndef __NR_unlinkat 1491 #define __NR_unlinkat 292 1492 #endif 1493 #ifndef __NR_move_pages 1494 #define __NR_move_pages 301 1495 #endif 1496 #ifndef __NR_getcpu 1497 #define __NR_getcpu 302 1498 #endif 1499 /* End of powerpc defininitions */ 1500 #endif 1501 1502 1503 /* After forking, we must make sure to only call system calls. */ 1504 #if defined(__BOUNDED_POINTERS__) 1505 #error "Need to port invocations of syscalls for bounded ptrs" 1506 #else 1507 /* The core dumper and the thread lister get executed after threads 1508 * have been suspended. As a consequence, we cannot call any functions 1509 * that acquire locks. Unfortunately, libc wraps most system calls 1510 * (e.g. in order to implement pthread_atfork, and to make calls 1511 * cancellable), which means we cannot call these functions. Instead, 1512 * we have to call syscall() directly. 1513 */ 1514 #undef LSS_ERRNO 1515 #ifdef SYS_ERRNO 1516 /* Allow the including file to override the location of errno. This can 1517 * be useful when using clone() with the CLONE_VM option. 1518 */ 1519 #define LSS_ERRNO SYS_ERRNO 1520 #else 1521 #define LSS_ERRNO errno 1522 #endif 1523 1524 #undef LSS_INLINE 1525 #ifdef SYS_INLINE 1526 #define LSS_INLINE SYS_INLINE 1527 #else 1528 #define LSS_INLINE static inline 1529 #endif 1530 1531 /* Allow the including file to override the prefix used for all new 1532 * system calls. By default, it will be set to "sys_". 1533 */ 1534 #undef LSS_NAME 1535 #ifndef SYS_PREFIX 1536 #define LSS_NAME(name) sys_##name 1537 #elif defined(SYS_PREFIX) && SYS_PREFIX < 0 1538 #define LSS_NAME(name) name 1539 #elif defined(SYS_PREFIX) && SYS_PREFIX == 0 1540 #define LSS_NAME(name) sys0_##name 1541 #elif defined(SYS_PREFIX) && SYS_PREFIX == 1 1542 #define LSS_NAME(name) sys1_##name 1543 #elif defined(SYS_PREFIX) && SYS_PREFIX == 2 1544 #define LSS_NAME(name) sys2_##name 1545 #elif defined(SYS_PREFIX) && SYS_PREFIX == 3 1546 #define LSS_NAME(name) sys3_##name 1547 #elif defined(SYS_PREFIX) && SYS_PREFIX == 4 1548 #define LSS_NAME(name) sys4_##name 1549 #elif defined(SYS_PREFIX) && SYS_PREFIX == 5 1550 #define LSS_NAME(name) sys5_##name 1551 #elif defined(SYS_PREFIX) && SYS_PREFIX == 6 1552 #define LSS_NAME(name) sys6_##name 1553 #elif defined(SYS_PREFIX) && SYS_PREFIX == 7 1554 #define LSS_NAME(name) sys7_##name 1555 #elif defined(SYS_PREFIX) && SYS_PREFIX == 8 1556 #define LSS_NAME(name) sys8_##name 1557 #elif defined(SYS_PREFIX) && SYS_PREFIX == 9 1558 #define LSS_NAME(name) sys9_##name 1559 #endif 1560 1561 #undef LSS_RETURN 1562 #if (defined(__i386__) || defined(__x86_64__) || defined(__ARM_ARCH_3__) \ 1563 || defined(__ARM_EABI__) || defined(__aarch64__)) 1564 /* Failing system calls return a negative result in the range of 1565 * -1..-4095. These are "errno" values with the sign inverted. 1566 */ 1567 #define LSS_RETURN(type, res) \ 1568 do { \ 1569 if ((unsigned long)(res) >= (unsigned long)(-4095)) { \ 1570 LSS_ERRNO = -(res); \ 1571 res = -1; \ 1572 } \ 1573 return (type) (res); \ 1574 } while (0) 1575 #elif defined(__mips__) 1576 /* On MIPS, failing system calls return -1, and set errno in a 1577 * separate CPU register. 1578 */ 1579 #define LSS_RETURN(type, res, err) \ 1580 do { \ 1581 if (err) { \ 1582 unsigned long __errnovalue = (res); \ 1583 LSS_ERRNO = __errnovalue; \ 1584 res = -1; \ 1585 } \ 1586 return (type) (res); \ 1587 } while (0) 1588 #elif defined(__PPC__) 1589 /* On PPC, failing system calls return -1, and set errno in a 1590 * separate CPU register. See linux/unistd.h. 1591 */ 1592 #define LSS_RETURN(type, res, err) \ 1593 do { \ 1594 if (err & 0x10000000 ) { \ 1595 LSS_ERRNO = (res); \ 1596 res = -1; \ 1597 } \ 1598 return (type) (res); \ 1599 } while (0) 1600 #endif 1601 #if defined(__i386__) 1602 /* In PIC mode (e.g. when building shared libraries), gcc for i386 1603 * reserves ebx. Unfortunately, most distribution ship with implementations 1604 * of _syscallX() which clobber ebx. 1605 * Also, most definitions of _syscallX() neglect to mark "memory" as being 1606 * clobbered. This causes problems with compilers, that do a better job 1607 * at optimizing across __asm__ calls. 1608 * So, we just have to redefine all of the _syscallX() macros. 1609 */ 1610 #undef LSS_ENTRYPOINT 1611 #ifdef SYS_SYSCALL_ENTRYPOINT 1612 static inline void (**LSS_NAME(get_syscall_entrypoint)(void))(void) { 1613 void (**entrypoint)(void); 1614 asm volatile(".bss\n" 1615 ".align 8\n" 1616 ".globl " SYS_SYSCALL_ENTRYPOINT "\n" 1617 ".common " SYS_SYSCALL_ENTRYPOINT ",8,8\n" 1618 ".previous\n" 1619 /* This logically does 'lea "SYS_SYSCALL_ENTRYPOINT", %0' */ 1620 "call 0f\n" 1621 "0:pop %0\n" 1622 "add $_GLOBAL_OFFSET_TABLE_+[.-0b], %0\n" 1623 "mov " SYS_SYSCALL_ENTRYPOINT "@GOT(%0), %0\n" 1624 : "=r"(entrypoint)); 1625 return entrypoint; 1626 } 1627 1628 #define LSS_ENTRYPOINT ".bss\n" \ 1629 ".align 8\n" \ 1630 ".globl " SYS_SYSCALL_ENTRYPOINT "\n" \ 1631 ".common " SYS_SYSCALL_ENTRYPOINT ",8,8\n" \ 1632 ".previous\n" \ 1633 /* Check the SYS_SYSCALL_ENTRYPOINT vector */ \ 1634 "push %%eax\n" \ 1635 "call 10000f\n" \ 1636 "10000:pop %%eax\n" \ 1637 "add $_GLOBAL_OFFSET_TABLE_+[.-10000b], %%eax\n" \ 1638 "mov " SYS_SYSCALL_ENTRYPOINT \ 1639 "@GOT(%%eax), %%eax\n" \ 1640 "mov 0(%%eax), %%eax\n" \ 1641 "test %%eax, %%eax\n" \ 1642 "jz 10002f\n" \ 1643 "push %%eax\n" \ 1644 "call 10001f\n" \ 1645 "10001:pop %%eax\n" \ 1646 "add $(10003f-10001b), %%eax\n" \ 1647 "xchg 4(%%esp), %%eax\n" \ 1648 "ret\n" \ 1649 "10002:pop %%eax\n" \ 1650 "int $0x80\n" \ 1651 "10003:\n" 1652 #else 1653 #define LSS_ENTRYPOINT "int $0x80\n" 1654 #endif 1655 #undef LSS_BODY 1656 #define LSS_BODY(type,args...) \ 1657 long __res; \ 1658 __asm__ __volatile__("push %%ebx\n" \ 1659 "movl %2,%%ebx\n" \ 1660 LSS_ENTRYPOINT \ 1661 "pop %%ebx" \ 1662 args \ 1663 : "esp", "memory"); \ 1664 LSS_RETURN(type,__res) 1665 #undef _syscall0 1666 #define _syscall0(type,name) \ 1667 type LSS_NAME(name)(void) { \ 1668 long __res; \ 1669 __asm__ volatile(LSS_ENTRYPOINT \ 1670 : "=a" (__res) \ 1671 : "0" (__NR_##name) \ 1672 : "esp", "memory"); \ 1673 LSS_RETURN(type,__res); \ 1674 } 1675 #undef _syscall1 1676 #define _syscall1(type,name,type1,arg1) \ 1677 type LSS_NAME(name)(type1 arg1) { \ 1678 LSS_BODY(type, \ 1679 : "=a" (__res) \ 1680 : "0" (__NR_##name), "ri" ((long)(arg1))); \ 1681 } 1682 #undef _syscall2 1683 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 1684 type LSS_NAME(name)(type1 arg1,type2 arg2) { \ 1685 LSS_BODY(type, \ 1686 : "=a" (__res) \ 1687 : "0" (__NR_##name),"ri" ((long)(arg1)), "c" ((long)(arg2))); \ 1688 } 1689 #undef _syscall3 1690 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 1691 type LSS_NAME(name)(type1 arg1,type2 arg2,type3 arg3) { \ 1692 LSS_BODY(type, \ 1693 : "=a" (__res) \ 1694 : "0" (__NR_##name), "ri" ((long)(arg1)), "c" ((long)(arg2)), \ 1695 "d" ((long)(arg3))); \ 1696 } 1697 #undef _syscall4 1698 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 1699 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ 1700 LSS_BODY(type, \ 1701 : "=a" (__res) \ 1702 : "0" (__NR_##name), "ri" ((long)(arg1)), "c" ((long)(arg2)), \ 1703 "d" ((long)(arg3)),"S" ((long)(arg4))); \ 1704 } 1705 #undef _syscall5 1706 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 1707 type5,arg5) \ 1708 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 1709 type5 arg5) { \ 1710 long __res; \ 1711 __asm__ __volatile__("push %%ebx\n" \ 1712 "movl %2,%%ebx\n" \ 1713 "movl %1,%%eax\n" \ 1714 LSS_ENTRYPOINT \ 1715 "pop %%ebx" \ 1716 : "=a" (__res) \ 1717 : "i" (__NR_##name), "ri" ((long)(arg1)), \ 1718 "c" ((long)(arg2)), "d" ((long)(arg3)), \ 1719 "S" ((long)(arg4)), "D" ((long)(arg5)) \ 1720 : "esp", "memory"); \ 1721 LSS_RETURN(type,__res); \ 1722 } 1723 #undef _syscall6 1724 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 1725 type5,arg5,type6,arg6) \ 1726 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 1727 type5 arg5, type6 arg6) { \ 1728 long __res; \ 1729 struct { long __a1; long __a6; } __s = { (long)arg1, (long) arg6 }; \ 1730 __asm__ __volatile__("push %%ebp\n" \ 1731 "push %%ebx\n" \ 1732 "movl 4(%2),%%ebp\n" \ 1733 "movl 0(%2), %%ebx\n" \ 1734 "movl %1,%%eax\n" \ 1735 LSS_ENTRYPOINT \ 1736 "pop %%ebx\n" \ 1737 "pop %%ebp" \ 1738 : "=a" (__res) \ 1739 : "i" (__NR_##name), "0" ((long)(&__s)), \ 1740 "c" ((long)(arg2)), "d" ((long)(arg3)), \ 1741 "S" ((long)(arg4)), "D" ((long)(arg5)) \ 1742 : "esp", "memory"); \ 1743 LSS_RETURN(type,__res); \ 1744 } 1745 LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack, 1746 int flags, void *arg, int *parent_tidptr, 1747 void *newtls, int *child_tidptr) { 1748 long __res; 1749 __asm__ __volatile__(/* if (fn == NULL) 1750 * return -EINVAL; 1751 */ 1752 "movl %3,%%ecx\n" 1753 "jecxz 1f\n" 1754 1755 /* if (child_stack == NULL) 1756 * return -EINVAL; 1757 */ 1758 "movl %4,%%ecx\n" 1759 "jecxz 1f\n" 1760 1761 /* Set up alignment of the child stack: 1762 * child_stack = (child_stack & ~0xF) - 20; 1763 */ 1764 "andl $-16,%%ecx\n" 1765 "subl $20,%%ecx\n" 1766 1767 /* Push "arg" and "fn" onto the stack that will be 1768 * used by the child. 1769 */ 1770 "movl %6,%%eax\n" 1771 "movl %%eax,4(%%ecx)\n" 1772 "movl %3,%%eax\n" 1773 "movl %%eax,(%%ecx)\n" 1774 1775 /* %eax = syscall(%eax = __NR_clone, 1776 * %ebx = flags, 1777 * %ecx = child_stack, 1778 * %edx = parent_tidptr, 1779 * %esi = newtls, 1780 * %edi = child_tidptr) 1781 * Also, make sure that %ebx gets preserved as it is 1782 * used in PIC mode. 1783 */ 1784 "movl %8,%%esi\n" 1785 "movl %7,%%edx\n" 1786 "movl %5,%%eax\n" 1787 "movl %9,%%edi\n" 1788 "pushl %%ebx\n" 1789 "movl %%eax,%%ebx\n" 1790 "movl %2,%%eax\n" 1791 LSS_ENTRYPOINT 1792 1793 /* In the parent: restore %ebx 1794 * In the child: move "fn" into %ebx 1795 */ 1796 "popl %%ebx\n" 1797 1798 /* if (%eax != 0) 1799 * return %eax; 1800 */ 1801 "test %%eax,%%eax\n" 1802 "jnz 1f\n" 1803 1804 /* In the child, now. Terminate frame pointer chain. 1805 */ 1806 "movl $0,%%ebp\n" 1807 1808 /* Call "fn". "arg" is already on the stack. 1809 */ 1810 "call *%%ebx\n" 1811 1812 /* Call _exit(%ebx). Unfortunately older versions 1813 * of gcc restrict the number of arguments that can 1814 * be passed to asm(). So, we need to hard-code the 1815 * system call number. 1816 */ 1817 "movl %%eax,%%ebx\n" 1818 "movl $1,%%eax\n" 1819 LSS_ENTRYPOINT 1820 1821 /* Return to parent. 1822 */ 1823 "1:\n" 1824 : "=a" (__res) 1825 : "0"(-EINVAL), "i"(__NR_clone), 1826 "m"(fn), "m"(child_stack), "m"(flags), "m"(arg), 1827 "m"(parent_tidptr), "m"(newtls), "m"(child_tidptr) 1828 : "esp", "memory", "ecx", "edx", "esi", "edi"); 1829 LSS_RETURN(int, __res); 1830 } 1831 1832 #define __NR__fadvise64_64 __NR_fadvise64_64 1833 LSS_INLINE _syscall6(int, _fadvise64_64, int, fd, 1834 unsigned, offset_lo, unsigned, offset_hi, 1835 unsigned, len_lo, unsigned, len_hi, 1836 int, advice) 1837 1838 LSS_INLINE int LSS_NAME(fadvise64)(int fd, loff_t offset, 1839 loff_t len, int advice) { 1840 return LSS_NAME(_fadvise64_64)(fd, 1841 (unsigned)offset, (unsigned)(offset >>32), 1842 (unsigned)len, (unsigned)(len >> 32), 1843 advice); 1844 } 1845 1846 #define __NR__fallocate __NR_fallocate 1847 LSS_INLINE _syscall6(int, _fallocate, int, fd, 1848 int, mode, 1849 unsigned, offset_lo, unsigned, offset_hi, 1850 unsigned, len_lo, unsigned, len_hi) 1851 1852 LSS_INLINE int LSS_NAME(fallocate)(int fd, int mode, 1853 loff_t offset, loff_t len) { 1854 union { loff_t off; unsigned w[2]; } o = { offset }, l = { len }; 1855 return LSS_NAME(_fallocate)(fd, mode, o.w[0], o.w[1], l.w[0], l.w[1]); 1856 } 1857 1858 LSS_INLINE _syscall1(int, set_thread_area, void *, u) 1859 LSS_INLINE _syscall1(int, get_thread_area, void *, u) 1860 1861 LSS_INLINE void (*LSS_NAME(restore_rt)(void))(void) { 1862 /* On i386, the kernel does not know how to return from a signal 1863 * handler. Instead, it relies on user space to provide a 1864 * restorer function that calls the {rt_,}sigreturn() system call. 1865 * Unfortunately, we cannot just reference the glibc version of this 1866 * function, as glibc goes out of its way to make it inaccessible. 1867 */ 1868 void (*res)(void); 1869 __asm__ __volatile__("call 2f\n" 1870 "0:.align 16\n" 1871 "1:movl %1,%%eax\n" 1872 LSS_ENTRYPOINT 1873 "2:popl %0\n" 1874 "addl $(1b-0b),%0\n" 1875 : "=a" (res) 1876 : "i" (__NR_rt_sigreturn)); 1877 return res; 1878 } 1879 LSS_INLINE void (*LSS_NAME(restore)(void))(void) { 1880 /* On i386, the kernel does not know how to return from a signal 1881 * handler. Instead, it relies on user space to provide a 1882 * restorer function that calls the {rt_,}sigreturn() system call. 1883 * Unfortunately, we cannot just reference the glibc version of this 1884 * function, as glibc goes out of its way to make it inaccessible. 1885 */ 1886 void (*res)(void); 1887 __asm__ __volatile__("call 2f\n" 1888 "0:.align 16\n" 1889 "1:pop %%eax\n" 1890 "movl %1,%%eax\n" 1891 LSS_ENTRYPOINT 1892 "2:popl %0\n" 1893 "addl $(1b-0b),%0\n" 1894 : "=a" (res) 1895 : "i" (__NR_sigreturn)); 1896 return res; 1897 } 1898 #elif defined(__x86_64__) 1899 /* There are no known problems with any of the _syscallX() macros 1900 * currently shipping for x86_64, but we still need to be able to define 1901 * our own version so that we can override the location of the errno 1902 * location (e.g. when using the clone() system call with the CLONE_VM 1903 * option). 1904 */ 1905 #undef LSS_ENTRYPOINT 1906 #ifdef SYS_SYSCALL_ENTRYPOINT 1907 static inline void (**LSS_NAME(get_syscall_entrypoint)(void))(void) { 1908 void (**entrypoint)(void); 1909 asm volatile(".bss\n" 1910 ".align 8\n" 1911 ".globl " SYS_SYSCALL_ENTRYPOINT "\n" 1912 ".common " SYS_SYSCALL_ENTRYPOINT ",8,8\n" 1913 ".previous\n" 1914 "mov " SYS_SYSCALL_ENTRYPOINT "@GOTPCREL(%%rip), %0\n" 1915 : "=r"(entrypoint)); 1916 return entrypoint; 1917 } 1918 1919 #define LSS_ENTRYPOINT \ 1920 ".bss\n" \ 1921 ".align 8\n" \ 1922 ".globl " SYS_SYSCALL_ENTRYPOINT "\n" \ 1923 ".common " SYS_SYSCALL_ENTRYPOINT ",8,8\n" \ 1924 ".previous\n" \ 1925 "mov " SYS_SYSCALL_ENTRYPOINT "@GOTPCREL(%%rip), %%rcx\n" \ 1926 "mov 0(%%rcx), %%rcx\n" \ 1927 "test %%rcx, %%rcx\n" \ 1928 "jz 10001f\n" \ 1929 "call *%%rcx\n" \ 1930 "jmp 10002f\n" \ 1931 "10001:syscall\n" \ 1932 "10002:\n" 1933 1934 #else 1935 #define LSS_ENTRYPOINT "syscall\n" 1936 #endif 1937 1938 /* The x32 ABI has 32 bit longs, but the syscall interface is 64 bit. 1939 * We need to explicitly cast to an unsigned 64 bit type to avoid implicit 1940 * sign extension. We can't cast pointers directly because those are 1941 * 32 bits, and gcc will dump ugly warnings about casting from a pointer 1942 * to an integer of a different size. 1943 */ 1944 #undef LSS_SYSCALL_ARG 1945 #define LSS_SYSCALL_ARG(a) ((uint64_t)(uintptr_t)(a)) 1946 #undef _LSS_RETURN 1947 #define _LSS_RETURN(type, res, cast) \ 1948 do { \ 1949 if ((uint64_t)(res) >= (uint64_t)(-4095)) { \ 1950 LSS_ERRNO = -(res); \ 1951 res = -1; \ 1952 } \ 1953 return (type)(cast)(res); \ 1954 } while (0) 1955 #undef LSS_RETURN 1956 #define LSS_RETURN(type, res) _LSS_RETURN(type, res, uintptr_t) 1957 1958 #undef _LSS_BODY 1959 #define _LSS_BODY(nr, type, name, cast, ...) \ 1960 long long __res; \ 1961 __asm__ __volatile__(LSS_BODY_ASM##nr LSS_ENTRYPOINT \ 1962 : "=a" (__res) \ 1963 : "0" (__NR_##name) LSS_BODY_ARG##nr(__VA_ARGS__) \ 1964 : LSS_BODY_CLOBBER##nr "r11", "rcx", "memory"); \ 1965 _LSS_RETURN(type, __res, cast) 1966 #undef LSS_BODY 1967 #define LSS_BODY(nr, type, name, args...) \ 1968 _LSS_BODY(nr, type, name, uintptr_t, ## args) 1969 1970 #undef LSS_BODY_ASM0 1971 #undef LSS_BODY_ASM1 1972 #undef LSS_BODY_ASM2 1973 #undef LSS_BODY_ASM3 1974 #undef LSS_BODY_ASM4 1975 #undef LSS_BODY_ASM5 1976 #undef LSS_BODY_ASM6 1977 #define LSS_BODY_ASM0 1978 #define LSS_BODY_ASM1 LSS_BODY_ASM0 1979 #define LSS_BODY_ASM2 LSS_BODY_ASM1 1980 #define LSS_BODY_ASM3 LSS_BODY_ASM2 1981 #define LSS_BODY_ASM4 LSS_BODY_ASM3 "movq %5,%%r10;" 1982 #define LSS_BODY_ASM5 LSS_BODY_ASM4 "movq %6,%%r8;" 1983 #define LSS_BODY_ASM6 LSS_BODY_ASM5 "movq %7,%%r9;" 1984 1985 #undef LSS_BODY_CLOBBER0 1986 #undef LSS_BODY_CLOBBER1 1987 #undef LSS_BODY_CLOBBER2 1988 #undef LSS_BODY_CLOBBER3 1989 #undef LSS_BODY_CLOBBER4 1990 #undef LSS_BODY_CLOBBER5 1991 #undef LSS_BODY_CLOBBER6 1992 #define LSS_BODY_CLOBBER0 1993 #define LSS_BODY_CLOBBER1 LSS_BODY_CLOBBER0 1994 #define LSS_BODY_CLOBBER2 LSS_BODY_CLOBBER1 1995 #define LSS_BODY_CLOBBER3 LSS_BODY_CLOBBER2 1996 #define LSS_BODY_CLOBBER4 LSS_BODY_CLOBBER3 "r10", 1997 #define LSS_BODY_CLOBBER5 LSS_BODY_CLOBBER4 "r8", 1998 #define LSS_BODY_CLOBBER6 LSS_BODY_CLOBBER5 "r9", 1999 2000 #undef LSS_BODY_ARG0 2001 #undef LSS_BODY_ARG1 2002 #undef LSS_BODY_ARG2 2003 #undef LSS_BODY_ARG3 2004 #undef LSS_BODY_ARG4 2005 #undef LSS_BODY_ARG5 2006 #undef LSS_BODY_ARG6 2007 #define LSS_BODY_ARG0() 2008 #define LSS_BODY_ARG1(arg1) \ 2009 LSS_BODY_ARG0(), "D" (arg1) 2010 #define LSS_BODY_ARG2(arg1, arg2) \ 2011 LSS_BODY_ARG1(arg1), "S" (arg2) 2012 #define LSS_BODY_ARG3(arg1, arg2, arg3) \ 2013 LSS_BODY_ARG2(arg1, arg2), "d" (arg3) 2014 #define LSS_BODY_ARG4(arg1, arg2, arg3, arg4) \ 2015 LSS_BODY_ARG3(arg1, arg2, arg3), "r" (arg4) 2016 #define LSS_BODY_ARG5(arg1, arg2, arg3, arg4, arg5) \ 2017 LSS_BODY_ARG4(arg1, arg2, arg3, arg4), "r" (arg5) 2018 #define LSS_BODY_ARG6(arg1, arg2, arg3, arg4, arg5, arg6) \ 2019 LSS_BODY_ARG5(arg1, arg2, arg3, arg4, arg5), "r" (arg6) 2020 2021 #undef _syscall0 2022 #define _syscall0(type,name) \ 2023 type LSS_NAME(name)(void) { \ 2024 LSS_BODY(0, type, name); \ 2025 } 2026 #undef _syscall1 2027 #define _syscall1(type,name,type1,arg1) \ 2028 type LSS_NAME(name)(type1 arg1) { \ 2029 LSS_BODY(1, type, name, LSS_SYSCALL_ARG(arg1)); \ 2030 } 2031 #undef _syscall2 2032 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 2033 type LSS_NAME(name)(type1 arg1, type2 arg2) { \ 2034 LSS_BODY(2, type, name, LSS_SYSCALL_ARG(arg1), LSS_SYSCALL_ARG(arg2));\ 2035 } 2036 #undef _syscall3 2037 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 2038 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \ 2039 LSS_BODY(3, type, name, LSS_SYSCALL_ARG(arg1), LSS_SYSCALL_ARG(arg2), \ 2040 LSS_SYSCALL_ARG(arg3)); \ 2041 } 2042 #undef _syscall4 2043 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 2044 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ 2045 LSS_BODY(4, type, name, LSS_SYSCALL_ARG(arg1), LSS_SYSCALL_ARG(arg2), \ 2046 LSS_SYSCALL_ARG(arg3), LSS_SYSCALL_ARG(arg4));\ 2047 } 2048 #undef _syscall5 2049 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 2050 type5,arg5) \ 2051 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 2052 type5 arg5) { \ 2053 LSS_BODY(5, type, name, LSS_SYSCALL_ARG(arg1), LSS_SYSCALL_ARG(arg2), \ 2054 LSS_SYSCALL_ARG(arg3), LSS_SYSCALL_ARG(arg4), \ 2055 LSS_SYSCALL_ARG(arg5)); \ 2056 } 2057 #undef _syscall6 2058 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 2059 type5,arg5,type6,arg6) \ 2060 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 2061 type5 arg5, type6 arg6) { \ 2062 LSS_BODY(6, type, name, LSS_SYSCALL_ARG(arg1), LSS_SYSCALL_ARG(arg2), \ 2063 LSS_SYSCALL_ARG(arg3), LSS_SYSCALL_ARG(arg4), \ 2064 LSS_SYSCALL_ARG(arg5), LSS_SYSCALL_ARG(arg6));\ 2065 } 2066 LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack, 2067 int flags, void *arg, int *parent_tidptr, 2068 void *newtls, int *child_tidptr) { 2069 long long __res; 2070 { 2071 __asm__ __volatile__(/* if (fn == NULL) 2072 * return -EINVAL; 2073 */ 2074 "testq %4,%4\n" 2075 "jz 1f\n" 2076 2077 /* if (child_stack == NULL) 2078 * return -EINVAL; 2079 */ 2080 "testq %5,%5\n" 2081 "jz 1f\n" 2082 2083 /* childstack -= 2*sizeof(void *); 2084 */ 2085 "subq $16,%5\n" 2086 2087 /* Push "arg" and "fn" onto the stack that will be 2088 * used by the child. 2089 */ 2090 "movq %7,8(%5)\n" 2091 "movq %4,0(%5)\n" 2092 2093 /* %rax = syscall(%rax = __NR_clone, 2094 * %rdi = flags, 2095 * %rsi = child_stack, 2096 * %rdx = parent_tidptr, 2097 * %r8 = new_tls, 2098 * %r10 = child_tidptr) 2099 */ 2100 "movq %2,%%rax\n" 2101 "movq %9,%%r8\n" 2102 "movq %10,%%r10\n" 2103 LSS_ENTRYPOINT 2104 2105 /* if (%rax != 0) 2106 * return; 2107 */ 2108 "testq %%rax,%%rax\n" 2109 "jnz 1f\n" 2110 2111 /* In the child. Terminate frame pointer chain. 2112 */ 2113 "xorq %%rbp,%%rbp\n" 2114 2115 /* Call "fn(arg)". 2116 */ 2117 "popq %%rax\n" 2118 "popq %%rdi\n" 2119 "call *%%rax\n" 2120 2121 /* Call _exit(%ebx). 2122 */ 2123 "movq %%rax,%%rdi\n" 2124 "movq %3,%%rax\n" 2125 LSS_ENTRYPOINT 2126 2127 /* Return to parent. 2128 */ 2129 "1:\n" 2130 : "=a" (__res) 2131 : "0"(-EINVAL), "i"(__NR_clone), "i"(__NR_exit), 2132 "r"(LSS_SYSCALL_ARG(fn)), 2133 "S"(LSS_SYSCALL_ARG(child_stack)), 2134 "D"(LSS_SYSCALL_ARG(flags)), 2135 "r"(LSS_SYSCALL_ARG(arg)), 2136 "d"(LSS_SYSCALL_ARG(parent_tidptr)), 2137 "r"(LSS_SYSCALL_ARG(newtls)), 2138 "r"(LSS_SYSCALL_ARG(child_tidptr)) 2139 : "rsp", "memory", "r8", "r10", "r11", "rcx"); 2140 } 2141 LSS_RETURN(int, __res); 2142 } 2143 LSS_INLINE _syscall2(int, arch_prctl, int, c, void *, a) 2144 2145 /* Need to make sure loff_t isn't truncated to 32-bits under x32. */ 2146 LSS_INLINE int LSS_NAME(fadvise64)(int fd, loff_t offset, loff_t len, 2147 int advice) { 2148 LSS_BODY(4, int, fadvise64, LSS_SYSCALL_ARG(fd), (uint64_t)(offset), 2149 (uint64_t)(len), LSS_SYSCALL_ARG(advice)); 2150 } 2151 2152 LSS_INLINE void (*LSS_NAME(restore_rt)(void))(void) { 2153 /* On x86-64, the kernel does not know how to return from 2154 * a signal handler. Instead, it relies on user space to provide a 2155 * restorer function that calls the rt_sigreturn() system call. 2156 * Unfortunately, we cannot just reference the glibc version of this 2157 * function, as glibc goes out of its way to make it inaccessible. 2158 */ 2159 long long res; 2160 __asm__ __volatile__("jmp 2f\n" 2161 ".align 16\n" 2162 "1:movq %1,%%rax\n" 2163 LSS_ENTRYPOINT 2164 "2:leaq 1b(%%rip),%0\n" 2165 : "=r" (res) 2166 : "i" (__NR_rt_sigreturn)); 2167 return (void (*)(void))(uintptr_t)res; 2168 } 2169 #elif defined(__ARM_ARCH_3__) 2170 /* Most definitions of _syscallX() neglect to mark "memory" as being 2171 * clobbered. This causes problems with compilers, that do a better job 2172 * at optimizing across __asm__ calls. 2173 * So, we just have to redefine all of the _syscallX() macros. 2174 */ 2175 #undef LSS_REG 2176 #define LSS_REG(r,a) register long __r##r __asm__("r"#r) = (long)a 2177 #undef LSS_BODY 2178 #define LSS_BODY(type,name,args...) \ 2179 register long __res_r0 __asm__("r0"); \ 2180 long __res; \ 2181 __asm__ __volatile__ (__syscall(name) \ 2182 : "=r"(__res_r0) : args : "lr", "memory"); \ 2183 __res = __res_r0; \ 2184 LSS_RETURN(type, __res) 2185 #undef _syscall0 2186 #define _syscall0(type, name) \ 2187 type LSS_NAME(name)(void) { \ 2188 LSS_BODY(type, name); \ 2189 } 2190 #undef _syscall1 2191 #define _syscall1(type, name, type1, arg1) \ 2192 type LSS_NAME(name)(type1 arg1) { \ 2193 LSS_REG(0, arg1); LSS_BODY(type, name, "r"(__r0)); \ 2194 } 2195 #undef _syscall2 2196 #define _syscall2(type, name, type1, arg1, type2, arg2) \ 2197 type LSS_NAME(name)(type1 arg1, type2 arg2) { \ 2198 LSS_REG(0, arg1); LSS_REG(1, arg2); \ 2199 LSS_BODY(type, name, "r"(__r0), "r"(__r1)); \ 2200 } 2201 #undef _syscall3 2202 #define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ 2203 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \ 2204 LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ 2205 LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2)); \ 2206 } 2207 #undef _syscall4 2208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 2209 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ 2210 LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ 2211 LSS_REG(3, arg4); \ 2212 LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3)); \ 2213 } 2214 #undef _syscall5 2215 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 2216 type5,arg5) \ 2217 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 2218 type5 arg5) { \ 2219 LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ 2220 LSS_REG(3, arg4); LSS_REG(4, arg5); \ 2221 LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3), \ 2222 "r"(__r4)); \ 2223 } 2224 #undef _syscall6 2225 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 2226 type5,arg5,type6,arg6) \ 2227 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 2228 type5 arg5, type6 arg6) { \ 2229 LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ 2230 LSS_REG(3, arg4); LSS_REG(4, arg5); LSS_REG(5, arg6); \ 2231 LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3), \ 2232 "r"(__r4), "r"(__r5)); \ 2233 } 2234 LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack, 2235 int flags, void *arg, int *parent_tidptr, 2236 void *newtls, int *child_tidptr) { 2237 long __res; 2238 { 2239 register int __flags __asm__("r0") = flags; 2240 register void *__stack __asm__("r1") = child_stack; 2241 register void *__ptid __asm__("r2") = parent_tidptr; 2242 register void *__tls __asm__("r3") = newtls; 2243 register int *__ctid __asm__("r4") = child_tidptr; 2244 __asm__ __volatile__(/* if (fn == NULL || child_stack == NULL) 2245 * return -EINVAL; 2246 */ 2247 "cmp %2,#0\n" 2248 "cmpne %3,#0\n" 2249 "moveq %0,%1\n" 2250 "beq 1f\n" 2251 2252 /* Push "arg" and "fn" onto the stack that will be 2253 * used by the child. 2254 */ 2255 "str %5,[%3,#-4]!\n" 2256 "str %2,[%3,#-4]!\n" 2257 2258 /* %r0 = syscall(%r0 = flags, 2259 * %r1 = child_stack, 2260 * %r2 = parent_tidptr, 2261 * %r3 = newtls, 2262 * %r4 = child_tidptr) 2263 */ 2264 __syscall(clone)"\n" 2265 2266 /* if (%r0 != 0) 2267 * return %r0; 2268 */ 2269 "movs %0,r0\n" 2270 "bne 1f\n" 2271 2272 /* In the child, now. Call "fn(arg)". 2273 */ 2274 "ldr r0,[sp, #4]\n" 2275 "mov lr,pc\n" 2276 "ldr pc,[sp]\n" 2277 2278 /* Call _exit(%r0). 2279 */ 2280 __syscall(exit)"\n" 2281 "1:\n" 2282 : "=r" (__res) 2283 : "i"(-EINVAL), 2284 "r"(fn), "r"(__stack), "r"(__flags), "r"(arg), 2285 "r"(__ptid), "r"(__tls), "r"(__ctid) 2286 : "cc", "lr", "memory"); 2287 } 2288 LSS_RETURN(int, __res); 2289 } 2290 #elif defined(__ARM_EABI__) 2291 /* Most definitions of _syscallX() neglect to mark "memory" as being 2292 * clobbered. This causes problems with compilers, that do a better job 2293 * at optimizing across __asm__ calls. 2294 * So, we just have to redefine all fo the _syscallX() macros. 2295 */ 2296 #undef LSS_REG 2297 #define LSS_REG(r,a) register long __r##r __asm__("r"#r) = (long)a 2298 #undef LSS_BODY 2299 #define LSS_BODY(type,name,args...) \ 2300 register long __res_r0 __asm__("r0"); \ 2301 long __res; \ 2302 __asm__ __volatile__ ("push {r7}\n" \ 2303 "mov r7, %1\n" \ 2304 "swi 0x0\n" \ 2305 "pop {r7}\n" \ 2306 : "=r"(__res_r0) \ 2307 : "i"(__NR_##name) , ## args \ 2308 : "lr", "memory"); \ 2309 __res = __res_r0; \ 2310 LSS_RETURN(type, __res) 2311 #undef _syscall0 2312 #define _syscall0(type, name) \ 2313 type LSS_NAME(name)(void) { \ 2314 LSS_BODY(type, name); \ 2315 } 2316 #undef _syscall1 2317 #define _syscall1(type, name, type1, arg1) \ 2318 type LSS_NAME(name)(type1 arg1) { \ 2319 LSS_REG(0, arg1); LSS_BODY(type, name, "r"(__r0)); \ 2320 } 2321 #undef _syscall2 2322 #define _syscall2(type, name, type1, arg1, type2, arg2) \ 2323 type LSS_NAME(name)(type1 arg1, type2 arg2) { \ 2324 LSS_REG(0, arg1); LSS_REG(1, arg2); \ 2325 LSS_BODY(type, name, "r"(__r0), "r"(__r1)); \ 2326 } 2327 #undef _syscall3 2328 #define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ 2329 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \ 2330 LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ 2331 LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2)); \ 2332 } 2333 #undef _syscall4 2334 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 2335 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ 2336 LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ 2337 LSS_REG(3, arg4); \ 2338 LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3)); \ 2339 } 2340 #undef _syscall5 2341 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 2342 type5,arg5) \ 2343 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 2344 type5 arg5) { \ 2345 LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ 2346 LSS_REG(3, arg4); LSS_REG(4, arg5); \ 2347 LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3), \ 2348 "r"(__r4)); \ 2349 } 2350 #undef _syscall6 2351 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 2352 type5,arg5,type6,arg6) \ 2353 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 2354 type5 arg5, type6 arg6) { \ 2355 LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ 2356 LSS_REG(3, arg4); LSS_REG(4, arg5); LSS_REG(5, arg6); \ 2357 LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3), \ 2358 "r"(__r4), "r"(__r5)); \ 2359 } 2360 LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack, 2361 int flags, void *arg, int *parent_tidptr, 2362 void *newtls, int *child_tidptr) { 2363 long __res; 2364 { 2365 register int __flags __asm__("r0") = flags; 2366 register void *__stack __asm__("r1") = child_stack; 2367 register void *__ptid __asm__("r2") = parent_tidptr; 2368 register void *__tls __asm__("r3") = newtls; 2369 register int *__ctid __asm__("r4") = child_tidptr; 2370 __asm__ __volatile__(/* if (fn == NULL || child_stack == NULL) 2371 * return -EINVAL; 2372 */ 2373 #ifdef __thumb2__ 2374 "push {r7}\n" 2375 #endif 2376 "cmp %2,#0\n" 2377 "it ne\n" 2378 "cmpne %3,#0\n" 2379 "it eq\n" 2380 "moveq %0,%1\n" 2381 "beq 1f\n" 2382 2383 /* Push "arg" and "fn" onto the stack that will be 2384 * used by the child. 2385 */ 2386 "str %5,[%3,#-4]!\n" 2387 "str %2,[%3,#-4]!\n" 2388 2389 /* %r0 = syscall(%r0 = flags, 2390 * %r1 = child_stack, 2391 * %r2 = parent_tidptr, 2392 * %r3 = newtls, 2393 * %r4 = child_tidptr) 2394 */ 2395 "mov r7, %9\n" 2396 "swi 0x0\n" 2397 2398 /* if (%r0 != 0) 2399 * return %r0; 2400 */ 2401 "movs %0,r0\n" 2402 "bne 1f\n" 2403 2404 /* In the child, now. Call "fn(arg)". 2405 */ 2406 "ldr r0,[sp, #4]\n" 2407 2408 /* When compiling for Thumb-2 the "MOV LR,PC" here 2409 * won't work because it loads PC+4 into LR, 2410 * whereas the LDR is a 4-byte instruction. 2411 * This results in the child thread always 2412 * crashing with an "Illegal Instruction" when it 2413 * returned into the middle of the LDR instruction 2414 * The instruction sequence used instead was 2415 * recommended by 2416 * "https://wiki.edubuntu.org/ARM/Thumb2PortingHowto#Quick_Reference". 2417 */ 2418 #ifdef __thumb2__ 2419 "ldr r7,[sp]\n" 2420 "blx r7\n" 2421 #else 2422 "mov lr,pc\n" 2423 "ldr pc,[sp]\n" 2424 #endif 2425 2426 /* Call _exit(%r0). 2427 */ 2428 "mov r7, %10\n" 2429 "swi 0x0\n" 2430 "1:\n" 2431 #ifdef __thumb2__ 2432 "pop {r7}" 2433 #endif 2434 : "=r" (__res) 2435 : "i"(-EINVAL), 2436 "r"(fn), "r"(__stack), "r"(__flags), "r"(arg), 2437 "r"(__ptid), "r"(__tls), "r"(__ctid), 2438 "i"(__NR_clone), "i"(__NR_exit) 2439 #ifdef __thumb2__ 2440 : "cc", "lr", "memory"); 2441 #else 2442 : "cc", "r7", "lr", "memory"); 2443 #endif 2444 } 2445 LSS_RETURN(int, __res); 2446 } 2447 #elif defined(__aarch64__) 2448 /* Most definitions of _syscallX() neglect to mark "memory" as being 2449 * clobbered. This causes problems with compilers, that do a better job 2450 * at optimizing across __asm__ calls. 2451 * So, we just have to redefine all of the _syscallX() macros. 2452 */ 2453 #undef LSS_REG 2454 #define LSS_REG(r,a) register int64_t __r##r __asm__("x"#r) = (int64_t)a 2455 #undef LSS_BODY 2456 #define LSS_BODY(type,name,args...) \ 2457 register int64_t __res_x0 __asm__("x0"); \ 2458 int64_t __res; \ 2459 __asm__ __volatile__ ("mov x8, %1\n" \ 2460 "svc 0x0\n" \ 2461 : "=r"(__res_x0) \ 2462 : "i"(__NR_##name) , ## args \ 2463 : "x8", "memory"); \ 2464 __res = __res_x0; \ 2465 LSS_RETURN(type, __res) 2466 #undef _syscall0 2467 #define _syscall0(type, name) \ 2468 type LSS_NAME(name)(void) { \ 2469 LSS_BODY(type, name); \ 2470 } 2471 #undef _syscall1 2472 #define _syscall1(type, name, type1, arg1) \ 2473 type LSS_NAME(name)(type1 arg1) { \ 2474 LSS_REG(0, arg1); LSS_BODY(type, name, "r"(__r0)); \ 2475 } 2476 #undef _syscall2 2477 #define _syscall2(type, name, type1, arg1, type2, arg2) \ 2478 type LSS_NAME(name)(type1 arg1, type2 arg2) { \ 2479 LSS_REG(0, arg1); LSS_REG(1, arg2); \ 2480 LSS_BODY(type, name, "r"(__r0), "r"(__r1)); \ 2481 } 2482 #undef _syscall3 2483 #define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ 2484 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \ 2485 LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ 2486 LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2)); \ 2487 } 2488 #undef _syscall4 2489 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 2490 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ 2491 LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ 2492 LSS_REG(3, arg4); \ 2493 LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3)); \ 2494 } 2495 #undef _syscall5 2496 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 2497 type5,arg5) \ 2498 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 2499 type5 arg5) { \ 2500 LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ 2501 LSS_REG(3, arg4); LSS_REG(4, arg5); \ 2502 LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3), \ 2503 "r"(__r4)); \ 2504 } 2505 #undef _syscall6 2506 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 2507 type5,arg5,type6,arg6) \ 2508 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 2509 type5 arg5, type6 arg6) { \ 2510 LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ 2511 LSS_REG(3, arg4); LSS_REG(4, arg5); LSS_REG(5, arg6); \ 2512 LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3), \ 2513 "r"(__r4), "r"(__r5)); \ 2514 } 2515 2516 LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack, 2517 int flags, void *arg, int *parent_tidptr, 2518 void *newtls, int *child_tidptr) { 2519 int64_t __res; 2520 { 2521 register uint64_t __flags __asm__("x0") = flags; 2522 register void *__stack __asm__("x1") = child_stack; 2523 register void *__ptid __asm__("x2") = parent_tidptr; 2524 register void *__tls __asm__("x3") = newtls; 2525 register int *__ctid __asm__("x4") = child_tidptr; 2526 __asm__ __volatile__(/* Push "arg" and "fn" onto the stack that will be 2527 * used by the child. 2528 */ 2529 "stp %1, %4, [%2, #-16]!\n" 2530 2531 /* %x0 = syscall(%x0 = flags, 2532 * %x1 = child_stack, 2533 * %x2 = parent_tidptr, 2534 * %x3 = newtls, 2535 * %x4 = child_tidptr) 2536 */ 2537 "mov x8, %8\n" 2538 "svc 0x0\n" 2539 2540 /* if (%r0 != 0) 2541 * return %r0; 2542 */ 2543 "mov %0, x0\n" 2544 "cbnz x0, 1f\n" 2545 2546 /* In the child, now. Call "fn(arg)". 2547 */ 2548 "ldp x1, x0, [sp], #16\n" 2549 "blr x1\n" 2550 2551 /* Call _exit(%r0). 2552 */ 2553 "mov x8, %9\n" 2554 "svc 0x0\n" 2555 "1:\n" 2556 : "=r" (__res) 2557 : "r"(fn), "r"(__stack), "r"(__flags), "r"(arg), 2558 "r"(__ptid), "r"(__tls), "r"(__ctid), 2559 "i"(__NR_clone), "i"(__NR_exit) 2560 : "cc", "x8", "memory"); 2561 } 2562 LSS_RETURN(int, __res); 2563 } 2564 #elif defined(__mips__) 2565 #undef LSS_REG 2566 #define LSS_REG(r,a) register unsigned long __r##r __asm__("$"#r) = \ 2567 (unsigned long)(a) 2568 #undef LSS_BODY 2569 #undef LSS_SYSCALL_CLOBBERS 2570 #if _MIPS_SIM == _MIPS_SIM_ABI32 2571 #define LSS_SYSCALL_CLOBBERS "$1", "$3", "$8", "$9", "$10", \ 2572 "$11", "$12", "$13", "$14", "$15", \ 2573 "$24", "$25", "hi", "lo", "memory" 2574 #else 2575 #define LSS_SYSCALL_CLOBBERS "$1", "$3", "$10", "$11", "$12", \ 2576 "$13", "$14", "$15", "$24", "$25", \ 2577 "hi", "lo", "memory" 2578 #endif 2579 #define LSS_BODY(type,name,r7,...) \ 2580 register unsigned long __v0 __asm__("$2") = __NR_##name; \ 2581 __asm__ __volatile__ ("syscall\n" \ 2582 : "=r"(__v0), r7 (__r7) \ 2583 : "0"(__v0), ##__VA_ARGS__ \ 2584 : LSS_SYSCALL_CLOBBERS); \ 2585 LSS_RETURN(type, __v0, __r7) 2586 #undef _syscall0 2587 #define _syscall0(type, name) \ 2588 type LSS_NAME(name)(void) { \ 2589 register unsigned long __r7 __asm__("$7"); \ 2590 LSS_BODY(type, name, "=r"); \ 2591 } 2592 #undef _syscall1 2593 #define _syscall1(type, name, type1, arg1) \ 2594 type LSS_NAME(name)(type1 arg1) { \ 2595 register unsigned long __r7 __asm__("$7"); \ 2596 LSS_REG(4, arg1); LSS_BODY(type, name, "=r", "r"(__r4)); \ 2597 } 2598 #undef _syscall2 2599 #define _syscall2(type, name, type1, arg1, type2, arg2) \ 2600 type LSS_NAME(name)(type1 arg1, type2 arg2) { \ 2601 register unsigned long __r7 __asm__("$7"); \ 2602 LSS_REG(4, arg1); LSS_REG(5, arg2); \ 2603 LSS_BODY(type, name, "=r", "r"(__r4), "r"(__r5)); \ 2604 } 2605 #undef _syscall3 2606 #define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ 2607 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \ 2608 register unsigned long __r7 __asm__("$7"); \ 2609 LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \ 2610 LSS_BODY(type, name, "=r", "r"(__r4), "r"(__r5), "r"(__r6)); \ 2611 } 2612 #undef _syscall4 2613 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 2614 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ 2615 LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \ 2616 LSS_REG(7, arg4); \ 2617 LSS_BODY(type, name, "+r", "r"(__r4), "r"(__r5), "r"(__r6)); \ 2618 } 2619 #undef _syscall5 2620 #if _MIPS_SIM == _MIPS_SIM_ABI32 2621 /* The old 32bit MIPS system call API passes the fifth and sixth argument 2622 * on the stack, whereas the new APIs use registers "r8" and "r9". 2623 */ 2624 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 2625 type5,arg5) \ 2626 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 2627 type5 arg5) { \ 2628 LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \ 2629 LSS_REG(7, arg4); \ 2630 register unsigned long __v0 __asm__("$2") = __NR_##name; \ 2631 __asm__ __volatile__ (".set noreorder\n" \ 2632 "subu $29, 32\n" \ 2633 "sw %5, 16($29)\n" \ 2634 "syscall\n" \ 2635 "addiu $29, 32\n" \ 2636 ".set reorder\n" \ 2637 : "+r"(__v0), "+r" (__r7) \ 2638 : "r"(__r4), "r"(__r5), \ 2639 "r"(__r6), "r" ((unsigned long)arg5) \ 2640 : "$8", "$9", "$10", "$11", "$12", \ 2641 "$13", "$14", "$15", "$24", "$25", \ 2642 "memory"); \ 2643 LSS_RETURN(type, __v0, __r7); \ 2644 } 2645 #else 2646 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 2647 type5,arg5) \ 2648 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 2649 type5 arg5) { \ 2650 LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \ 2651 LSS_REG(7, arg4); LSS_REG(8, arg5); \ 2652 LSS_BODY(type, name, "+r", "r"(__r4), "r"(__r5), "r"(__r6), \ 2653 "r"(__r8)); \ 2654 } 2655 #endif 2656 #undef _syscall6 2657 #if _MIPS_SIM == _MIPS_SIM_ABI32 2658 /* The old 32bit MIPS system call API passes the fifth and sixth argument 2659 * on the stack, whereas the new APIs use registers "r8" and "r9". 2660 */ 2661 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 2662 type5,arg5,type6,arg6) \ 2663 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 2664 type5 arg5, type6 arg6) { \ 2665 LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \ 2666 LSS_REG(7, arg4); \ 2667 register unsigned long __v0 __asm__("$2") = __NR_##name; \ 2668 __asm__ __volatile__ (".set noreorder\n" \ 2669 "subu $29, 32\n" \ 2670 "sw %5, 16($29)\n" \ 2671 "sw %6, 20($29)\n" \ 2672 "syscall\n" \ 2673 "addiu $29, 32\n" \ 2674 ".set reorder\n" \ 2675 : "+r"(__v0), "+r" (__r7) \ 2676 : "r"(__r4), "r"(__r5), \ 2677 "r"(__r6), "r" ((unsigned long)arg5), \ 2678 "r" ((unsigned long)arg6) \ 2679 : "$8", "$9", "$10", "$11", "$12", \ 2680 "$13", "$14", "$15", "$24", "$25", \ 2681 "memory"); \ 2682 LSS_RETURN(type, __v0, __r7); \ 2683 } 2684 #else 2685 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 2686 type5,arg5,type6,arg6) \ 2687 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 2688 type5 arg5,type6 arg6) { \ 2689 LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \ 2690 LSS_REG(7, arg4); LSS_REG(8, arg5); LSS_REG(9, arg6); \ 2691 LSS_BODY(type, name, "+r", "r"(__r4), "r"(__r5), "r"(__r6), \ 2692 "r"(__r8), "r"(__r9)); \ 2693 } 2694 #endif 2695 LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack, 2696 int flags, void *arg, int *parent_tidptr, 2697 void *newtls, int *child_tidptr) { 2698 register unsigned long __v0 __asm__("$2"); 2699 register unsigned long __r7 __asm__("$7") = (unsigned long)newtls; 2700 { 2701 register int __flags __asm__("$4") = flags; 2702 register void *__stack __asm__("$5") = child_stack; 2703 register void *__ptid __asm__("$6") = parent_tidptr; 2704 register int *__ctid __asm__("$8") = child_tidptr; 2705 __asm__ __volatile__( 2706 #if _MIPS_SIM == _MIPS_SIM_ABI32 && _MIPS_SZPTR == 32 2707 "subu $29,24\n" 2708 #elif _MIPS_SIM == _MIPS_SIM_NABI32 2709 "sub $29,16\n" 2710 #else 2711 "dsubu $29,16\n" 2712 #endif 2713 2714 /* if (fn == NULL || child_stack == NULL) 2715 * return -EINVAL; 2716 */ 2717 "li %0,%2\n" 2718 "beqz %5,1f\n" 2719 "beqz %6,1f\n" 2720 2721 /* Push "arg" and "fn" onto the stack that will be 2722 * used by the child. 2723 */ 2724 #if _MIPS_SIM == _MIPS_SIM_ABI32 && _MIPS_SZPTR == 32 2725 "subu %6,32\n" 2726 "sw %5,0(%6)\n" 2727 "sw %8,4(%6)\n" 2728 #elif _MIPS_SIM == _MIPS_SIM_NABI32 2729 "sub %6,32\n" 2730 "sw %5,0(%6)\n" 2731 "sw %8,8(%6)\n" 2732 #else 2733 "dsubu %6,32\n" 2734 "sd %5,0(%6)\n" 2735 "sd %8,8(%6)\n" 2736 #endif 2737 2738 /* $7 = syscall($4 = flags, 2739 * $5 = child_stack, 2740 * $6 = parent_tidptr, 2741 * $7 = newtls, 2742 * $8 = child_tidptr) 2743 */ 2744 "li $2,%3\n" 2745 "syscall\n" 2746 2747 /* if ($7 != 0) 2748 * return $2; 2749 */ 2750 "bnez $7,1f\n" 2751 "bnez $2,1f\n" 2752 2753 /* In the child, now. Call "fn(arg)". 2754 */ 2755 #if _MIPS_SIM == _MIPS_SIM_ABI32 && _MIPS_SZPTR == 32 2756 "lw $25,0($29)\n" 2757 "lw $4,4($29)\n" 2758 #elif _MIPS_SIM == _MIPS_SIM_NABI32 2759 "lw $25,0($29)\n" 2760 "lw $4,8($29)\n" 2761 #else 2762 "ld $25,0($29)\n" 2763 "ld $4,8($29)\n" 2764 #endif 2765 "jalr $25\n" 2766 2767 /* Call _exit($2) 2768 */ 2769 "move $4,$2\n" 2770 "li $2,%4\n" 2771 "syscall\n" 2772 2773 "1:\n" 2774 #if _MIPS_SIM == _MIPS_SIM_ABI32 && _MIPS_SZPTR == 32 2775 "addu $29, 24\n" 2776 #elif _MIPS_SIM == _MIPS_SIM_NABI32 2777 "add $29, 16\n" 2778 #else 2779 "daddu $29,16\n" 2780 #endif 2781 : "+r" (__v0), "+r" (__r7) 2782 : "i"(-EINVAL), "i"(__NR_clone), "i"(__NR_exit), 2783 "r"(fn), "r"(__stack), "r"(__flags), "r"(arg), 2784 "r"(__ptid), "r"(__r7), "r"(__ctid) 2785 : "$9", "$10", "$11", "$12", "$13", "$14", "$15", 2786 "$24", "$25", "memory"); 2787 } 2788 LSS_RETURN(int, __v0, __r7); 2789 } 2790 #elif defined (__PPC__) 2791 #undef LSS_LOADARGS_0 2792 #define LSS_LOADARGS_0(name, dummy...) \ 2793 __sc_0 = __NR_##name 2794 #undef LSS_LOADARGS_1 2795 #define LSS_LOADARGS_1(name, arg1) \ 2796 LSS_LOADARGS_0(name); \ 2797 __sc_3 = (unsigned long) (arg1) 2798 #undef LSS_LOADARGS_2 2799 #define LSS_LOADARGS_2(name, arg1, arg2) \ 2800 LSS_LOADARGS_1(name, arg1); \ 2801 __sc_4 = (unsigned long) (arg2) 2802 #undef LSS_LOADARGS_3 2803 #define LSS_LOADARGS_3(name, arg1, arg2, arg3) \ 2804 LSS_LOADARGS_2(name, arg1, arg2); \ 2805 __sc_5 = (unsigned long) (arg3) 2806 #undef LSS_LOADARGS_4 2807 #define LSS_LOADARGS_4(name, arg1, arg2, arg3, arg4) \ 2808 LSS_LOADARGS_3(name, arg1, arg2, arg3); \ 2809 __sc_6 = (unsigned long) (arg4) 2810 #undef LSS_LOADARGS_5 2811 #define LSS_LOADARGS_5(name, arg1, arg2, arg3, arg4, arg5) \ 2812 LSS_LOADARGS_4(name, arg1, arg2, arg3, arg4); \ 2813 __sc_7 = (unsigned long) (arg5) 2814 #undef LSS_LOADARGS_6 2815 #define LSS_LOADARGS_6(name, arg1, arg2, arg3, arg4, arg5, arg6) \ 2816 LSS_LOADARGS_5(name, arg1, arg2, arg3, arg4, arg5); \ 2817 __sc_8 = (unsigned long) (arg6) 2818 #undef LSS_ASMINPUT_0 2819 #define LSS_ASMINPUT_0 "0" (__sc_0) 2820 #undef LSS_ASMINPUT_1 2821 #define LSS_ASMINPUT_1 LSS_ASMINPUT_0, "1" (__sc_3) 2822 #undef LSS_ASMINPUT_2 2823 #define LSS_ASMINPUT_2 LSS_ASMINPUT_1, "2" (__sc_4) 2824 #undef LSS_ASMINPUT_3 2825 #define LSS_ASMINPUT_3 LSS_ASMINPUT_2, "3" (__sc_5) 2826 #undef LSS_ASMINPUT_4 2827 #define LSS_ASMINPUT_4 LSS_ASMINPUT_3, "4" (__sc_6) 2828 #undef LSS_ASMINPUT_5 2829 #define LSS_ASMINPUT_5 LSS_ASMINPUT_4, "5" (__sc_7) 2830 #undef LSS_ASMINPUT_6 2831 #define LSS_ASMINPUT_6 LSS_ASMINPUT_5, "6" (__sc_8) 2832 #undef LSS_BODY 2833 #define LSS_BODY(nr, type, name, args...) \ 2834 long __sc_ret, __sc_err; \ 2835 { \ 2836 register unsigned long __sc_0 __asm__ ("r0"); \ 2837 register unsigned long __sc_3 __asm__ ("r3"); \ 2838 register unsigned long __sc_4 __asm__ ("r4"); \ 2839 register unsigned long __sc_5 __asm__ ("r5"); \ 2840 register unsigned long __sc_6 __asm__ ("r6"); \ 2841 register unsigned long __sc_7 __asm__ ("r7"); \ 2842 register unsigned long __sc_8 __asm__ ("r8"); \ 2843 \ 2844 LSS_LOADARGS_##nr(name, args); \ 2845 __asm__ __volatile__ \ 2846 ("sc\n\t" \ 2847 "mfcr %0" \ 2848 : "=&r" (__sc_0), \ 2849 "=&r" (__sc_3), "=&r" (__sc_4), \ 2850 "=&r" (__sc_5), "=&r" (__sc_6), \ 2851 "=&r" (__sc_7), "=&r" (__sc_8) \ 2852 : LSS_ASMINPUT_##nr \ 2853 : "cr0", "ctr", "memory", \ 2854 "r9", "r10", "r11", "r12"); \ 2855 __sc_ret = __sc_3; \ 2856 __sc_err = __sc_0; \ 2857 } \ 2858 LSS_RETURN(type, __sc_ret, __sc_err) 2859 #undef _syscall0 2860 #define _syscall0(type, name) \ 2861 type LSS_NAME(name)(void) { \ 2862 LSS_BODY(0, type, name); \ 2863 } 2864 #undef _syscall1 2865 #define _syscall1(type, name, type1, arg1) \ 2866 type LSS_NAME(name)(type1 arg1) { \ 2867 LSS_BODY(1, type, name, arg1); \ 2868 } 2869 #undef _syscall2 2870 #define _syscall2(type, name, type1, arg1, type2, arg2) \ 2871 type LSS_NAME(name)(type1 arg1, type2 arg2) { \ 2872 LSS_BODY(2, type, name, arg1, arg2); \ 2873 } 2874 #undef _syscall3 2875 #define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ 2876 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \ 2877 LSS_BODY(3, type, name, arg1, arg2, arg3); \ 2878 } 2879 #undef _syscall4 2880 #define _syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \ 2881 type4, arg4) \ 2882 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ 2883 LSS_BODY(4, type, name, arg1, arg2, arg3, arg4); \ 2884 } 2885 #undef _syscall5 2886 #define _syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \ 2887 type4, arg4, type5, arg5) \ 2888 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 2889 type5 arg5) { \ 2890 LSS_BODY(5, type, name, arg1, arg2, arg3, arg4, arg5); \ 2891 } 2892 #undef _syscall6 2893 #define _syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \ 2894 type4, arg4, type5, arg5, type6, arg6) \ 2895 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 2896 type5 arg5, type6 arg6) { \ 2897 LSS_BODY(6, type, name, arg1, arg2, arg3, arg4, arg5, arg6); \ 2898 } 2899 /* clone function adapted from glibc 2.3.6 clone.S */ 2900 /* TODO(csilvers): consider wrapping some args up in a struct, like we 2901 * do for i386's _syscall6, so we can compile successfully on gcc 2.95 2902 */ 2903 LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack, 2904 int flags, void *arg, int *parent_tidptr, 2905 void *newtls, int *child_tidptr) { 2906 long __ret, __err; 2907 { 2908 register int (*__fn)(void *) __asm__ ("r8") = fn; 2909 register void *__cstack __asm__ ("r4") = child_stack; 2910 register int __flags __asm__ ("r3") = flags; 2911 register void * __arg __asm__ ("r9") = arg; 2912 register int * __ptidptr __asm__ ("r5") = parent_tidptr; 2913 register void * __newtls __asm__ ("r6") = newtls; 2914 register int * __ctidptr __asm__ ("r7") = child_tidptr; 2915 __asm__ __volatile__( 2916 /* check for fn == NULL 2917 * and child_stack == NULL 2918 */ 2919 "cmpwi cr0, %6, 0\n\t" 2920 "cmpwi cr1, %7, 0\n\t" 2921 "cror cr0*4+eq, cr1*4+eq, cr0*4+eq\n\t" 2922 "beq- cr0, 1f\n\t" 2923 2924 /* set up stack frame for child */ 2925 "clrrwi %7, %7, 4\n\t" 2926 "li 0, 0\n\t" 2927 "stwu 0, -16(%7)\n\t" 2928 2929 /* fn, arg, child_stack are saved across the syscall: r28-30 */ 2930 "mr 28, %6\n\t" 2931 "mr 29, %7\n\t" 2932 "mr 27, %9\n\t" 2933 2934 /* syscall */ 2935 "li 0, %4\n\t" 2936 /* flags already in r3 2937 * child_stack already in r4 2938 * ptidptr already in r5 2939 * newtls already in r6 2940 * ctidptr already in r7 2941 */ 2942 "sc\n\t" 2943 2944 /* Test if syscall was successful */ 2945 "cmpwi cr1, 3, 0\n\t" 2946 "crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t" 2947 "bne- cr1, 1f\n\t" 2948 2949 /* Do the function call */ 2950 "mtctr 28\n\t" 2951 "mr 3, 27\n\t" 2952 "bctrl\n\t" 2953 2954 /* Call _exit(r3) */ 2955 "li 0, %5\n\t" 2956 "sc\n\t" 2957 2958 /* Return to parent */ 2959 "1:\n" 2960 "mfcr %1\n\t" 2961 "mr %0, 3\n\t" 2962 : "=r" (__ret), "=r" (__err) 2963 : "0" (-1), "1" (EINVAL), 2964 "i" (__NR_clone), "i" (__NR_exit), 2965 "r" (__fn), "r" (__cstack), "r" (__flags), 2966 "r" (__arg), "r" (__ptidptr), "r" (__newtls), 2967 "r" (__ctidptr) 2968 : "cr0", "cr1", "memory", "ctr", 2969 "r0", "r29", "r27", "r28"); 2970 } 2971 LSS_RETURN(int, __ret, __err); 2972 } 2973 #endif 2974 #define __NR__exit __NR_exit 2975 #define __NR__gettid __NR_gettid 2976 #define __NR__mremap __NR_mremap 2977 LSS_INLINE _syscall1(void *, brk, void *, e) 2978 LSS_INLINE _syscall1(int, chdir, const char *,p) 2979 LSS_INLINE _syscall1(int, close, int, f) 2980 LSS_INLINE _syscall2(int, clock_getres, int, c, 2981 struct kernel_timespec*, t) 2982 LSS_INLINE _syscall2(int, clock_gettime, int, c, 2983 struct kernel_timespec*, t) 2984 LSS_INLINE _syscall1(int, dup, int, f) 2985 #if !defined(__aarch64__) 2986 // The dup2 syscall has been deprecated on aarch64. We polyfill it below. 2987 LSS_INLINE _syscall2(int, dup2, int, s, 2988 int, d) 2989 #endif 2990 LSS_INLINE _syscall3(int, execve, const char*, f, 2991 const char*const*,a,const char*const*, e) 2992 LSS_INLINE _syscall1(int, _exit, int, e) 2993 LSS_INLINE _syscall1(int, exit_group, int, e) 2994 LSS_INLINE _syscall3(int, fcntl, int, f, 2995 int, c, long, a) 2996 #if !defined(__aarch64__) 2997 // The fork syscall has been deprecated on aarch64. We polyfill it below. 2998 LSS_INLINE _syscall0(pid_t, fork) 2999 #endif 3000 LSS_INLINE _syscall2(int, fstat, int, f, 3001 struct kernel_stat*, b) 3002 LSS_INLINE _syscall2(int, fstatfs, int, f, 3003 struct kernel_statfs*, b) 3004 #if defined(__x86_64__) 3005 /* Need to make sure off_t isn't truncated to 32-bits under x32. */ 3006 LSS_INLINE int LSS_NAME(ftruncate)(int f, off_t l) { 3007 LSS_BODY(2, int, ftruncate, LSS_SYSCALL_ARG(f), (uint64_t)(l)); 3008 } 3009 #else 3010 LSS_INLINE _syscall2(int, ftruncate, int, f, 3011 off_t, l) 3012 #endif 3013 LSS_INLINE _syscall4(int, futex, int*, a, 3014 int, o, int, v, 3015 struct kernel_timespec*, t) 3016 LSS_INLINE _syscall3(int, getdents, int, f, 3017 struct kernel_dirent*, d, int, c) 3018 LSS_INLINE _syscall3(int, getdents64, int, f, 3019 struct kernel_dirent64*, d, int, c) 3020 LSS_INLINE _syscall0(gid_t, getegid) 3021 LSS_INLINE _syscall0(uid_t, geteuid) 3022 #if !defined(__aarch64__) 3023 // The getgprp syscall has been deprecated on aarch64. 3024 LSS_INLINE _syscall0(pid_t, getpgrp) 3025 #endif 3026 LSS_INLINE _syscall0(pid_t, getpid) 3027 LSS_INLINE _syscall0(pid_t, getppid) 3028 LSS_INLINE _syscall2(int, getpriority, int, a, 3029 int, b) 3030 LSS_INLINE _syscall3(int, getresgid, gid_t *, r, 3031 gid_t *, e, gid_t *, s) 3032 LSS_INLINE _syscall3(int, getresuid, uid_t *, r, 3033 uid_t *, e, uid_t *, s) 3034 #if !defined(__ARM_EABI__) 3035 LSS_INLINE _syscall2(int, getrlimit, int, r, 3036 struct kernel_rlimit*, l) 3037 #endif 3038 LSS_INLINE _syscall1(pid_t, getsid, pid_t, p) 3039 LSS_INLINE _syscall0(pid_t, _gettid) 3040 LSS_INLINE _syscall2(pid_t, gettimeofday, struct kernel_timeval*, t, 3041 void*, tz) 3042 LSS_INLINE _syscall5(int, setxattr, const char *,p, 3043 const char *, n, const void *,v, 3044 size_t, s, int, f) 3045 LSS_INLINE _syscall5(int, lsetxattr, const char *,p, 3046 const char *, n, const void *,v, 3047 size_t, s, int, f) 3048 LSS_INLINE _syscall4(ssize_t, getxattr, const char *,p, 3049 const char *, n, void *, v, size_t, s) 3050 LSS_INLINE _syscall4(ssize_t, lgetxattr, const char *,p, 3051 const char *, n, void *, v, size_t, s) 3052 LSS_INLINE _syscall3(ssize_t, listxattr, const char *,p, 3053 char *, l, size_t, s) 3054 LSS_INLINE _syscall3(ssize_t, llistxattr, const char *,p, 3055 char *, l, size_t, s) 3056 LSS_INLINE _syscall3(int, ioctl, int, d, 3057 int, r, void *, a) 3058 LSS_INLINE _syscall2(int, ioprio_get, int, which, 3059 int, who) 3060 LSS_INLINE _syscall3(int, ioprio_set, int, which, 3061 int, who, int, ioprio) 3062 LSS_INLINE _syscall2(int, kill, pid_t, p, 3063 int, s) 3064 #if defined(__x86_64__) 3065 /* Need to make sure off_t isn't truncated to 32-bits under x32. */ 3066 LSS_INLINE off_t LSS_NAME(lseek)(int f, off_t o, int w) { 3067 _LSS_BODY(3, off_t, lseek, off_t, LSS_SYSCALL_ARG(f), (uint64_t)(o), 3068 LSS_SYSCALL_ARG(w)); 3069 } 3070 #else 3071 LSS_INLINE _syscall3(off_t, lseek, int, f, 3072 off_t, o, int, w) 3073 #endif 3074 LSS_INLINE _syscall2(int, munmap, void*, s, 3075 size_t, l) 3076 LSS_INLINE _syscall6(long, move_pages, pid_t, p, 3077 unsigned long, n, void **,g, int *, d, 3078 int *, s, int, f) 3079 LSS_INLINE _syscall3(int, mprotect, const void *,a, 3080 size_t, l, int, p) 3081 LSS_INLINE _syscall5(void*, _mremap, void*, o, 3082 size_t, os, size_t, ns, 3083 unsigned long, f, void *, a) 3084 #if !defined(__aarch64__) 3085 // The open and poll syscalls have been deprecated on aarch64. We polyfill 3086 // them below. 3087 LSS_INLINE _syscall3(int, open, const char*, p, 3088 int, f, int, m) 3089 LSS_INLINE _syscall3(int, poll, struct kernel_pollfd*, u, 3090 unsigned int, n, int, t) 3091 #endif 3092 LSS_INLINE _syscall5(int, prctl, int, option, 3093 unsigned long, arg2, 3094 unsigned long, arg3, 3095 unsigned long, arg4, 3096 unsigned long, arg5) 3097 LSS_INLINE _syscall4(long, ptrace, int, r, 3098 pid_t, p, void *, a, void *, d) 3099 #if defined(__NR_quotactl) 3100 // Defined on x86_64 / i386 only 3101 LSS_INLINE _syscall4(int, quotactl, int, cmd, const char *, special, 3102 int, id, caddr_t, addr) 3103 #endif 3104 LSS_INLINE _syscall3(ssize_t, read, int, f, 3105 void *, b, size_t, c) 3106 #if !defined(__aarch64__) 3107 // The readlink syscall has been deprecated on aarch64. We polyfill below. 3108 LSS_INLINE _syscall3(int, readlink, const char*, p, 3109 char*, b, size_t, s) 3110 #endif 3111 LSS_INLINE _syscall4(int, rt_sigaction, int, s, 3112 const struct kernel_sigaction*, a, 3113 struct kernel_sigaction*, o, size_t, c) 3114 LSS_INLINE _syscall2(int, rt_sigpending, struct kernel_sigset_t *, s, 3115 size_t, c) 3116 LSS_INLINE _syscall4(int, rt_sigprocmask, int, h, 3117 const struct kernel_sigset_t*, s, 3118 struct kernel_sigset_t*, o, size_t, c) 3119 LSS_INLINE _syscall2(int, rt_sigsuspend, 3120 const struct kernel_sigset_t*, s, size_t, c) 3121 LSS_INLINE _syscall3(int, sched_getaffinity,pid_t, p, 3122 unsigned int, l, unsigned long *, m) 3123 LSS_INLINE _syscall3(int, sched_setaffinity,pid_t, p, 3124 unsigned int, l, unsigned long *, m) 3125 LSS_INLINE _syscall0(int, sched_yield) 3126 LSS_INLINE _syscall1(long, set_tid_address, int *, t) 3127 LSS_INLINE _syscall1(int, setfsgid, gid_t, g) 3128 LSS_INLINE _syscall1(int, setfsuid, uid_t, u) 3129 LSS_INLINE _syscall1(int, setuid, uid_t, u) 3130 LSS_INLINE _syscall1(int, setgid, gid_t, g) 3131 LSS_INLINE _syscall2(int, setpgid, pid_t, p, 3132 pid_t, g) 3133 LSS_INLINE _syscall3(int, setpriority, int, a, 3134 int, b, int, p) 3135 LSS_INLINE _syscall3(int, setresgid, gid_t, r, 3136 gid_t, e, gid_t, s) 3137 LSS_INLINE _syscall3(int, setresuid, uid_t, r, 3138 uid_t, e, uid_t, s) 3139 LSS_INLINE _syscall2(int, setrlimit, int, r, 3140 const struct kernel_rlimit*, l) 3141 LSS_INLINE _syscall0(pid_t, setsid) 3142 LSS_INLINE _syscall2(int, sigaltstack, const stack_t*, s, 3143 const stack_t*, o) 3144 #if defined(__NR_sigreturn) 3145 LSS_INLINE _syscall1(int, sigreturn, unsigned long, u) 3146 #endif 3147 #if !defined(__aarch64__) 3148 // The stat syscall has been deprecated on aarch64. We polyfill it below. 3149 LSS_INLINE _syscall2(int, stat, const char*, f, 3150 struct kernel_stat*, b) 3151 #endif 3152 LSS_INLINE _syscall2(int, statfs, const char*, f, 3153 struct kernel_statfs*, b) 3154 LSS_INLINE _syscall3(int, tgkill, pid_t, p, 3155 pid_t, t, int, s) 3156 LSS_INLINE _syscall2(int, tkill, pid_t, p, 3157 int, s) 3158 #if !defined(__aarch64__) 3159 // The unlink syscall has been deprecated on aarch64. We polyfill it below. 3160 LSS_INLINE _syscall1(int, unlink, const char*, f) 3161 #endif 3162 LSS_INLINE _syscall3(ssize_t, write, int, f, 3163 const void *, b, size_t, c) 3164 LSS_INLINE _syscall3(ssize_t, writev, int, f, 3165 const struct kernel_iovec*, v, size_t, c) 3166 #if defined(__NR_getcpu) 3167 LSS_INLINE _syscall3(long, getcpu, unsigned *, cpu, 3168 unsigned *, node, void *, unused) 3169 #endif 3170 #if defined(__x86_64__) || \ 3171 (defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI32) 3172 LSS_INLINE _syscall3(int, recvmsg, int, s, 3173 struct kernel_msghdr*, m, int, f) 3174 LSS_INLINE _syscall3(int, sendmsg, int, s, 3175 const struct kernel_msghdr*, m, int, f) 3176 LSS_INLINE _syscall6(int, sendto, int, s, 3177 const void*, m, size_t, l, 3178 int, f, 3179 const struct kernel_sockaddr*, a, int, t) 3180 LSS_INLINE _syscall2(int, shutdown, int, s, 3181 int, h) 3182 LSS_INLINE _syscall3(int, socket, int, d, 3183 int, t, int, p) 3184 LSS_INLINE _syscall4(int, socketpair, int, d, 3185 int, t, int, p, int*, s) 3186 #endif 3187 #if defined(__x86_64__) 3188 /* Need to make sure loff_t isn't truncated to 32-bits under x32. */ 3189 LSS_INLINE int LSS_NAME(fallocate)(int f, int mode, loff_t offset, 3190 loff_t len) { 3191 LSS_BODY(4, int, fallocate, LSS_SYSCALL_ARG(f), LSS_SYSCALL_ARG(mode), 3192 (uint64_t)(offset), (uint64_t)(len)); 3193 } 3194 3195 LSS_INLINE int LSS_NAME(getresgid32)(gid_t *rgid, 3196 gid_t *egid, 3197 gid_t *sgid) { 3198 return LSS_NAME(getresgid)(rgid, egid, sgid); 3199 } 3200 3201 LSS_INLINE int LSS_NAME(getresuid32)(uid_t *ruid, 3202 uid_t *euid, 3203 uid_t *suid) { 3204 return LSS_NAME(getresuid)(ruid, euid, suid); 3205 } 3206 3207 /* Need to make sure __off64_t isn't truncated to 32-bits under x32. */ 3208 LSS_INLINE void* LSS_NAME(mmap)(void *s, size_t l, int p, int f, int d, 3209 int64_t o) { 3210 LSS_BODY(6, void*, mmap, LSS_SYSCALL_ARG(s), LSS_SYSCALL_ARG(l), 3211 LSS_SYSCALL_ARG(p), LSS_SYSCALL_ARG(f), 3212 LSS_SYSCALL_ARG(d), (uint64_t)(o)); 3213 } 3214 3215 LSS_INLINE _syscall4(int, newfstatat, int, d, 3216 const char *, p, 3217 struct kernel_stat*, b, int, f) 3218 3219 LSS_INLINE int LSS_NAME(setfsgid32)(gid_t gid) { 3220 return LSS_NAME(setfsgid)(gid); 3221 } 3222 3223 LSS_INLINE int LSS_NAME(setfsuid32)(uid_t uid) { 3224 return LSS_NAME(setfsuid)(uid); 3225 } 3226 3227 LSS_INLINE int LSS_NAME(setresgid32)(gid_t rgid, gid_t egid, gid_t sgid) { 3228 return LSS_NAME(setresgid)(rgid, egid, sgid); 3229 } 3230 3231 LSS_INLINE int LSS_NAME(setresuid32)(uid_t ruid, uid_t euid, uid_t suid) { 3232 return LSS_NAME(setresuid)(ruid, euid, suid); 3233 } 3234 3235 LSS_INLINE int LSS_NAME(sigaction)(int signum, 3236 const struct kernel_sigaction *act, 3237 struct kernel_sigaction *oldact) { 3238 /* On x86_64, the kernel requires us to always set our own 3239 * SA_RESTORER in order to be able to return from a signal handler. 3240 * This function must have a "magic" signature that the "gdb" 3241 * (and maybe the kernel?) can recognize. 3242 */ 3243 if (act != NULL && !(act->sa_flags & SA_RESTORER)) { 3244 struct kernel_sigaction a = *act; 3245 a.sa_flags |= SA_RESTORER; 3246 a.sa_restorer = LSS_NAME(restore_rt)(); 3247 return LSS_NAME(rt_sigaction)(signum, &a, oldact, 3248 (KERNEL_NSIG+7)/8); 3249 } else { 3250 return LSS_NAME(rt_sigaction)(signum, act, oldact, 3251 (KERNEL_NSIG+7)/8); 3252 } 3253 } 3254 3255 LSS_INLINE int LSS_NAME(sigpending)(struct kernel_sigset_t *set) { 3256 return LSS_NAME(rt_sigpending)(set, (KERNEL_NSIG+7)/8); 3257 } 3258 3259 LSS_INLINE int LSS_NAME(sigprocmask)(int how, 3260 const struct kernel_sigset_t *set, 3261 struct kernel_sigset_t *oldset) { 3262 return LSS_NAME(rt_sigprocmask)(how, set, oldset, (KERNEL_NSIG+7)/8); 3263 } 3264 3265 LSS_INLINE int LSS_NAME(sigsuspend)(const struct kernel_sigset_t *set) { 3266 return LSS_NAME(rt_sigsuspend)(set, (KERNEL_NSIG+7)/8); 3267 } 3268 #endif 3269 #if defined(__x86_64__) || defined(__ARM_ARCH_3__) || \ 3270 defined(__ARM_EABI__) || defined(__aarch64__) || \ 3271 (defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI32) 3272 LSS_INLINE _syscall4(pid_t, wait4, pid_t, p, 3273 int*, s, int, o, 3274 struct kernel_rusage*, r) 3275 3276 LSS_INLINE pid_t LSS_NAME(waitpid)(pid_t pid, int *status, int options){ 3277 return LSS_NAME(wait4)(pid, status, options, 0); 3278 } 3279 #endif 3280 #if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) 3281 LSS_INLINE _syscall4(int, openat, int, d, const char *, p, int, f, int, m) 3282 LSS_INLINE _syscall3(int, unlinkat, int, d, const char *, p, int, f) 3283 #endif 3284 #if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) 3285 #define __NR__getresgid32 __NR_getresgid32 3286 #define __NR__getresuid32 __NR_getresuid32 3287 #define __NR__setfsgid32 __NR_setfsgid32 3288 #define __NR__setfsuid32 __NR_setfsuid32 3289 #define __NR__setresgid32 __NR_setresgid32 3290 #define __NR__setresuid32 __NR_setresuid32 3291 #if defined(__ARM_EABI__) 3292 LSS_INLINE _syscall2(int, ugetrlimit, int, r, 3293 struct kernel_rlimit*, l) 3294 #endif 3295 LSS_INLINE _syscall3(int, _getresgid32, gid_t *, r, 3296 gid_t *, e, gid_t *, s) 3297 LSS_INLINE _syscall3(int, _getresuid32, uid_t *, r, 3298 uid_t *, e, uid_t *, s) 3299 LSS_INLINE _syscall1(int, _setfsgid32, gid_t, f) 3300 LSS_INLINE _syscall1(int, _setfsuid32, uid_t, f) 3301 LSS_INLINE _syscall3(int, _setresgid32, gid_t, r, 3302 gid_t, e, gid_t, s) 3303 LSS_INLINE _syscall3(int, _setresuid32, uid_t, r, 3304 uid_t, e, uid_t, s) 3305 3306 LSS_INLINE int LSS_NAME(getresgid32)(gid_t *rgid, 3307 gid_t *egid, 3308 gid_t *sgid) { 3309 int rc; 3310 if ((rc = LSS_NAME(_getresgid32)(rgid, egid, sgid)) < 0 && 3311 LSS_ERRNO == ENOSYS) { 3312 if ((rgid == NULL) || (egid == NULL) || (sgid == NULL)) { 3313 return EFAULT; 3314 } 3315 // Clear the high bits first, since getresgid only sets 16 bits 3316 *rgid = *egid = *sgid = 0; 3317 rc = LSS_NAME(getresgid)(rgid, egid, sgid); 3318 } 3319 return rc; 3320 } 3321 3322 LSS_INLINE int LSS_NAME(getresuid32)(uid_t *ruid, 3323 uid_t *euid, 3324 uid_t *suid) { 3325 int rc; 3326 if ((rc = LSS_NAME(_getresuid32)(ruid, euid, suid)) < 0 && 3327 LSS_ERRNO == ENOSYS) { 3328 if ((ruid == NULL) || (euid == NULL) || (suid == NULL)) { 3329 return EFAULT; 3330 } 3331 // Clear the high bits first, since getresuid only sets 16 bits 3332 *ruid = *euid = *suid = 0; 3333 rc = LSS_NAME(getresuid)(ruid, euid, suid); 3334 } 3335 return rc; 3336 } 3337 3338 LSS_INLINE int LSS_NAME(setfsgid32)(gid_t gid) { 3339 int rc; 3340 if ((rc = LSS_NAME(_setfsgid32)(gid)) < 0 && 3341 LSS_ERRNO == ENOSYS) { 3342 if ((unsigned int)gid & ~0xFFFFu) { 3343 rc = EINVAL; 3344 } else { 3345 rc = LSS_NAME(setfsgid)(gid); 3346 } 3347 } 3348 return rc; 3349 } 3350 3351 LSS_INLINE int LSS_NAME(setfsuid32)(uid_t uid) { 3352 int rc; 3353 if ((rc = LSS_NAME(_setfsuid32)(uid)) < 0 && 3354 LSS_ERRNO == ENOSYS) { 3355 if ((unsigned int)uid & ~0xFFFFu) { 3356 rc = EINVAL; 3357 } else { 3358 rc = LSS_NAME(setfsuid)(uid); 3359 } 3360 } 3361 return rc; 3362 } 3363 3364 LSS_INLINE int LSS_NAME(setresgid32)(gid_t rgid, gid_t egid, gid_t sgid) { 3365 int rc; 3366 if ((rc = LSS_NAME(_setresgid32)(rgid, egid, sgid)) < 0 && 3367 LSS_ERRNO == ENOSYS) { 3368 if ((unsigned int)rgid & ~0xFFFFu || 3369 (unsigned int)egid & ~0xFFFFu || 3370 (unsigned int)sgid & ~0xFFFFu) { 3371 rc = EINVAL; 3372 } else { 3373 rc = LSS_NAME(setresgid)(rgid, egid, sgid); 3374 } 3375 } 3376 return rc; 3377 } 3378 3379 LSS_INLINE int LSS_NAME(setresuid32)(uid_t ruid, uid_t euid, uid_t suid) { 3380 int rc; 3381 if ((rc = LSS_NAME(_setresuid32)(ruid, euid, suid)) < 0 && 3382 LSS_ERRNO == ENOSYS) { 3383 if ((unsigned int)ruid & ~0xFFFFu || 3384 (unsigned int)euid & ~0xFFFFu || 3385 (unsigned int)suid & ~0xFFFFu) { 3386 rc = EINVAL; 3387 } else { 3388 rc = LSS_NAME(setresuid)(ruid, euid, suid); 3389 } 3390 } 3391 return rc; 3392 } 3393 #endif 3394 LSS_INLINE int LSS_NAME(sigemptyset)(struct kernel_sigset_t *set) { 3395 memset(&set->sig, 0, sizeof(set->sig)); 3396 return 0; 3397 } 3398 3399 LSS_INLINE int LSS_NAME(sigfillset)(struct kernel_sigset_t *set) { 3400 memset(&set->sig, -1, sizeof(set->sig)); 3401 return 0; 3402 } 3403 3404 LSS_INLINE int LSS_NAME(sigaddset)(struct kernel_sigset_t *set, 3405 int signum) { 3406 if (signum < 1 || signum > (int)(8*sizeof(set->sig))) { 3407 LSS_ERRNO = EINVAL; 3408 return -1; 3409 } else { 3410 set->sig[(signum - 1)/(8*sizeof(set->sig[0]))] 3411 |= 1UL << ((signum - 1) % (8*sizeof(set->sig[0]))); 3412 return 0; 3413 } 3414 } 3415 3416 LSS_INLINE int LSS_NAME(sigdelset)(struct kernel_sigset_t *set, 3417 int signum) { 3418 if (signum < 1 || signum > (int)(8*sizeof(set->sig))) { 3419 LSS_ERRNO = EINVAL; 3420 return -1; 3421 } else { 3422 set->sig[(signum - 1)/(8*sizeof(set->sig[0]))] 3423 &= ~(1UL << ((signum - 1) % (8*sizeof(set->sig[0])))); 3424 return 0; 3425 } 3426 } 3427 3428 LSS_INLINE int LSS_NAME(sigismember)(struct kernel_sigset_t *set, 3429 int signum) { 3430 if (signum < 1 || signum > (int)(8*sizeof(set->sig))) { 3431 LSS_ERRNO = EINVAL; 3432 return -1; 3433 } else { 3434 return !!(set->sig[(signum - 1)/(8*sizeof(set->sig[0]))] & 3435 (1UL << ((signum - 1) % (8*sizeof(set->sig[0]))))); 3436 } 3437 } 3438 #if defined(__i386__) || defined(__ARM_ARCH_3__) || \ 3439 defined(__ARM_EABI__) || \ 3440 (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || defined(__PPC__) 3441 #define __NR__sigaction __NR_sigaction 3442 #define __NR__sigpending __NR_sigpending 3443 #define __NR__sigprocmask __NR_sigprocmask 3444 #define __NR__sigsuspend __NR_sigsuspend 3445 #define __NR__socketcall __NR_socketcall 3446 LSS_INLINE _syscall2(int, fstat64, int, f, 3447 struct kernel_stat64 *, b) 3448 LSS_INLINE _syscall5(int, _llseek, uint, fd, 3449 unsigned long, hi, unsigned long, lo, 3450 loff_t *, res, uint, wh) 3451 #if !defined(__ARM_EABI__) 3452 LSS_INLINE _syscall1(void*, mmap, void*, a) 3453 #endif 3454 LSS_INLINE _syscall6(void*, mmap2, void*, s, 3455 size_t, l, int, p, 3456 int, f, int, d, 3457 off_t, o) 3458 LSS_INLINE _syscall3(int, _sigaction, int, s, 3459 const struct kernel_old_sigaction*, a, 3460 struct kernel_old_sigaction*, o) 3461 LSS_INLINE _syscall1(int, _sigpending, unsigned long*, s) 3462 LSS_INLINE _syscall3(int, _sigprocmask, int, h, 3463 const unsigned long*, s, 3464 unsigned long*, o) 3465 #ifdef __PPC__ 3466 LSS_INLINE _syscall1(int, _sigsuspend, unsigned long, s) 3467 #else 3468 LSS_INLINE _syscall3(int, _sigsuspend, const void*, a, 3469 int, b, 3470 unsigned long, s) 3471 #endif 3472 LSS_INLINE _syscall2(int, stat64, const char *, p, 3473 struct kernel_stat64 *, b) 3474 3475 LSS_INLINE int LSS_NAME(sigaction)(int signum, 3476 const struct kernel_sigaction *act, 3477 struct kernel_sigaction *oldact) { 3478 int old_errno = LSS_ERRNO; 3479 int rc; 3480 struct kernel_sigaction a; 3481 if (act != NULL) { 3482 a = *act; 3483 #ifdef __i386__ 3484 /* On i386, the kernel requires us to always set our own 3485 * SA_RESTORER when using realtime signals. Otherwise, it does not 3486 * know how to return from a signal handler. This function must have 3487 * a "magic" signature that the "gdb" (and maybe the kernel?) can 3488 * recognize. 3489 * Apparently, a SA_RESTORER is implicitly set by the kernel, when 3490 * using non-realtime signals. 3491 * 3492 * TODO: Test whether ARM needs a restorer 3493 */ 3494 if (!(a.sa_flags & SA_RESTORER)) { 3495 a.sa_flags |= SA_RESTORER; 3496 a.sa_restorer = (a.sa_flags & SA_SIGINFO) 3497 ? LSS_NAME(restore_rt)() : LSS_NAME(restore)(); 3498 } 3499 #endif 3500 } 3501 rc = LSS_NAME(rt_sigaction)(signum, act ? &a : act, oldact, 3502 (KERNEL_NSIG+7)/8); 3503 if (rc < 0 && LSS_ERRNO == ENOSYS) { 3504 struct kernel_old_sigaction oa, ooa, *ptr_a = &oa, *ptr_oa = &ooa; 3505 if (!act) { 3506 ptr_a = NULL; 3507 } else { 3508 oa.sa_handler_ = act->sa_handler_; 3509 memcpy(&oa.sa_mask, &act->sa_mask, sizeof(oa.sa_mask)); 3510 #ifndef __mips__ 3511 oa.sa_restorer = act->sa_restorer; 3512 #endif 3513 oa.sa_flags = act->sa_flags; 3514 } 3515 if (!oldact) { 3516 ptr_oa = NULL; 3517 } 3518 LSS_ERRNO = old_errno; 3519 rc = LSS_NAME(_sigaction)(signum, ptr_a, ptr_oa); 3520 if (rc == 0 && oldact) { 3521 if (act) { 3522 memcpy(oldact, act, sizeof(*act)); 3523 } else { 3524 memset(oldact, 0, sizeof(*oldact)); 3525 } 3526 oldact->sa_handler_ = ptr_oa->sa_handler_; 3527 oldact->sa_flags = ptr_oa->sa_flags; 3528 memcpy(&oldact->sa_mask, &ptr_oa->sa_mask, sizeof(ptr_oa->sa_mask)); 3529 #ifndef __mips__ 3530 oldact->sa_restorer = ptr_oa->sa_restorer; 3531 #endif 3532 } 3533 } 3534 return rc; 3535 } 3536 3537 LSS_INLINE int LSS_NAME(sigpending)(struct kernel_sigset_t *set) { 3538 int old_errno = LSS_ERRNO; 3539 int rc = LSS_NAME(rt_sigpending)(set, (KERNEL_NSIG+7)/8); 3540 if (rc < 0 && LSS_ERRNO == ENOSYS) { 3541 LSS_ERRNO = old_errno; 3542 LSS_NAME(sigemptyset)(set); 3543 rc = LSS_NAME(_sigpending)(&set->sig[0]); 3544 } 3545 return rc; 3546 } 3547 3548 LSS_INLINE int LSS_NAME(sigprocmask)(int how, 3549 const struct kernel_sigset_t *set, 3550 struct kernel_sigset_t *oldset) { 3551 int olderrno = LSS_ERRNO; 3552 int rc = LSS_NAME(rt_sigprocmask)(how, set, oldset, (KERNEL_NSIG+7)/8); 3553 if (rc < 0 && LSS_ERRNO == ENOSYS) { 3554 LSS_ERRNO = olderrno; 3555 if (oldset) { 3556 LSS_NAME(sigemptyset)(oldset); 3557 } 3558 rc = LSS_NAME(_sigprocmask)(how, 3559 set ? &set->sig[0] : NULL, 3560 oldset ? &oldset->sig[0] : NULL); 3561 } 3562 return rc; 3563 } 3564 3565 LSS_INLINE int LSS_NAME(sigsuspend)(const struct kernel_sigset_t *set) { 3566 int olderrno = LSS_ERRNO; 3567 int rc = LSS_NAME(rt_sigsuspend)(set, (KERNEL_NSIG+7)/8); 3568 if (rc < 0 && LSS_ERRNO == ENOSYS) { 3569 LSS_ERRNO = olderrno; 3570 rc = LSS_NAME(_sigsuspend)( 3571 #ifndef __PPC__ 3572 set, 0, 3573 #endif 3574 set->sig[0]); 3575 } 3576 return rc; 3577 } 3578 #endif 3579 #if defined(__PPC__) 3580 #undef LSS_SC_LOADARGS_0 3581 #define LSS_SC_LOADARGS_0(dummy...) 3582 #undef LSS_SC_LOADARGS_1 3583 #define LSS_SC_LOADARGS_1(arg1) \ 3584 __sc_4 = (unsigned long) (arg1) 3585 #undef LSS_SC_LOADARGS_2 3586 #define LSS_SC_LOADARGS_2(arg1, arg2) \ 3587 LSS_SC_LOADARGS_1(arg1); \ 3588 __sc_5 = (unsigned long) (arg2) 3589 #undef LSS_SC_LOADARGS_3 3590 #define LSS_SC_LOADARGS_3(arg1, arg2, arg3) \ 3591 LSS_SC_LOADARGS_2(arg1, arg2); \ 3592 __sc_6 = (unsigned long) (arg3) 3593 #undef LSS_SC_LOADARGS_4 3594 #define LSS_SC_LOADARGS_4(arg1, arg2, arg3, arg4) \ 3595 LSS_SC_LOADARGS_3(arg1, arg2, arg3); \ 3596 __sc_7 = (unsigned long) (arg4) 3597 #undef LSS_SC_LOADARGS_5 3598 #define LSS_SC_LOADARGS_5(arg1, arg2, arg3, arg4, arg5) \ 3599 LSS_SC_LOADARGS_4(arg1, arg2, arg3, arg4); \ 3600 __sc_8 = (unsigned long) (arg5) 3601 #undef LSS_SC_BODY 3602 #define LSS_SC_BODY(nr, type, opt, args...) \ 3603 long __sc_ret, __sc_err; \ 3604 { \ 3605 register unsigned long __sc_0 __asm__ ("r0") = __NR_socketcall; \ 3606 register unsigned long __sc_3 __asm__ ("r3") = opt; \ 3607 register unsigned long __sc_4 __asm__ ("r4"); \ 3608 register unsigned long __sc_5 __asm__ ("r5"); \ 3609 register unsigned long __sc_6 __asm__ ("r6"); \ 3610 register unsigned long __sc_7 __asm__ ("r7"); \ 3611 register unsigned long __sc_8 __asm__ ("r8"); \ 3612 LSS_SC_LOADARGS_##nr(args); \ 3613 __asm__ __volatile__ \ 3614 ("stwu 1, -48(1)\n\t" \ 3615 "stw 4, 20(1)\n\t" \ 3616 "stw 5, 24(1)\n\t" \ 3617 "stw 6, 28(1)\n\t" \ 3618 "stw 7, 32(1)\n\t" \ 3619 "stw 8, 36(1)\n\t" \ 3620 "addi 4, 1, 20\n\t" \ 3621 "sc\n\t" \ 3622 "mfcr %0" \ 3623 : "=&r" (__sc_0), \ 3624 "=&r" (__sc_3), "=&r" (__sc_4), \ 3625 "=&r" (__sc_5), "=&r" (__sc_6), \ 3626 "=&r" (__sc_7), "=&r" (__sc_8) \ 3627 : LSS_ASMINPUT_##nr \ 3628 : "cr0", "ctr", "memory"); \ 3629 __sc_ret = __sc_3; \ 3630 __sc_err = __sc_0; \ 3631 } \ 3632 LSS_RETURN(type, __sc_ret, __sc_err) 3633 3634 LSS_INLINE ssize_t LSS_NAME(recvmsg)(int s,struct kernel_msghdr *msg, 3635 int flags){ 3636 LSS_SC_BODY(3, ssize_t, 17, s, msg, flags); 3637 } 3638 3639 LSS_INLINE ssize_t LSS_NAME(sendmsg)(int s, 3640 const struct kernel_msghdr *msg, 3641 int flags) { 3642 LSS_SC_BODY(3, ssize_t, 16, s, msg, flags); 3643 } 3644 3645 // TODO(csilvers): why is this ifdef'ed out? 3646 #if 0 3647 LSS_INLINE ssize_t LSS_NAME(sendto)(int s, const void *buf, size_t len, 3648 int flags, 3649 const struct kernel_sockaddr *to, 3650 unsigned int tolen) { 3651 LSS_BODY(6, ssize_t, 11, s, buf, len, flags, to, tolen); 3652 } 3653 #endif 3654 3655 LSS_INLINE int LSS_NAME(shutdown)(int s, int how) { 3656 LSS_SC_BODY(2, int, 13, s, how); 3657 } 3658 3659 LSS_INLINE int LSS_NAME(socket)(int domain, int type, int protocol) { 3660 LSS_SC_BODY(3, int, 1, domain, type, protocol); 3661 } 3662 3663 LSS_INLINE int LSS_NAME(socketpair)(int d, int type, int protocol, 3664 int sv[2]) { 3665 LSS_SC_BODY(4, int, 8, d, type, protocol, sv); 3666 } 3667 #endif 3668 #if defined(__ARM_EABI__) || defined (__aarch64__) 3669 LSS_INLINE _syscall3(ssize_t, recvmsg, int, s, struct kernel_msghdr*, msg, 3670 int, flags) 3671 LSS_INLINE _syscall3(ssize_t, sendmsg, int, s, const struct kernel_msghdr*, 3672 msg, int, flags) 3673 LSS_INLINE _syscall6(ssize_t, sendto, int, s, const void*, buf, size_t,len, 3674 int, flags, const struct kernel_sockaddr*, to, 3675 unsigned int, tolen) 3676 LSS_INLINE _syscall2(int, shutdown, int, s, int, how) 3677 LSS_INLINE _syscall3(int, socket, int, domain, int, type, int, protocol) 3678 LSS_INLINE _syscall4(int, socketpair, int, d, int, type, int, protocol, 3679 int*, sv) 3680 #endif 3681 #if defined(__i386__) || defined(__ARM_ARCH_3__) || \ 3682 (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) 3683 #define __NR__socketcall __NR_socketcall 3684 LSS_INLINE _syscall2(int, _socketcall, int, c, 3685 va_list, a) 3686 LSS_INLINE int LSS_NAME(socketcall)(int op, ...) { 3687 int rc; 3688 va_list ap; 3689 va_start(ap, op); 3690 rc = LSS_NAME(_socketcall)(op, ap); 3691 va_end(ap); 3692 return rc; 3693 } 3694 3695 LSS_INLINE ssize_t LSS_NAME(recvmsg)(int s,struct kernel_msghdr *msg, 3696 int flags){ 3697 return (ssize_t)LSS_NAME(socketcall)(17, s, msg, flags); 3698 } 3699 3700 LSS_INLINE ssize_t LSS_NAME(sendmsg)(int s, 3701 const struct kernel_msghdr *msg, 3702 int flags) { 3703 return (ssize_t)LSS_NAME(socketcall)(16, s, msg, flags); 3704 } 3705 3706 LSS_INLINE ssize_t LSS_NAME(sendto)(int s, const void *buf, size_t len, 3707 int flags, 3708 const struct kernel_sockaddr *to, 3709 unsigned int tolen) { 3710 return (ssize_t)LSS_NAME(socketcall)(11, s, buf, len, flags, to, tolen); 3711 } 3712 3713 LSS_INLINE int LSS_NAME(shutdown)(int s, int how) { 3714 return LSS_NAME(socketcall)(13, s, how); 3715 } 3716 3717 LSS_INLINE int LSS_NAME(socket)(int domain, int type, int protocol) { 3718 return LSS_NAME(socketcall)(1, domain, type, protocol); 3719 } 3720 3721 LSS_INLINE int LSS_NAME(socketpair)(int d, int type, int protocol, 3722 int sv[2]) { 3723 return LSS_NAME(socketcall)(8, d, type, protocol, sv); 3724 } 3725 #endif 3726 #if defined(__i386__) || defined(__PPC__) 3727 LSS_INLINE _syscall4(int, fstatat64, int, d, 3728 const char *, p, 3729 struct kernel_stat64 *, b, int, f) 3730 #endif 3731 #if defined(__i386__) || defined(__PPC__) || \ 3732 (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) 3733 LSS_INLINE _syscall3(pid_t, waitpid, pid_t, p, 3734 int*, s, int, o) 3735 #endif 3736 #if defined(__mips__) 3737 /* sys_pipe() on MIPS has non-standard calling conventions, as it returns 3738 * both file handles through CPU registers. 3739 */ 3740 LSS_INLINE int LSS_NAME(pipe)(int *p) { 3741 register unsigned long __v0 __asm__("$2") = __NR_pipe; 3742 register unsigned long __v1 __asm__("$3"); 3743 register unsigned long __r7 __asm__("$7"); 3744 __asm__ __volatile__ ("syscall\n" 3745 : "=r"(__v0), "=r"(__v1), "=r" (__r7) 3746 : "0"(__v0) 3747 : "$8", "$9", "$10", "$11", "$12", 3748 "$13", "$14", "$15", "$24", "$25", "memory"); 3749 if (__r7) { 3750 unsigned long __errnovalue = __v0; 3751 LSS_ERRNO = __errnovalue; 3752 return -1; 3753 } else { 3754 p[0] = __v0; 3755 p[1] = __v1; 3756 return 0; 3757 } 3758 } 3759 #elif !defined(__aarch64__) 3760 // The unlink syscall has been deprecated on aarch64. We polyfill it below. 3761 LSS_INLINE _syscall1(int, pipe, int *, p) 3762 #endif 3763 /* TODO(csilvers): see if ppc can/should support this as well */ 3764 #if defined(__i386__) || defined(__ARM_ARCH_3__) || \ 3765 defined(__ARM_EABI__) || \ 3766 (defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI64) 3767 #define __NR__statfs64 __NR_statfs64 3768 #define __NR__fstatfs64 __NR_fstatfs64 3769 LSS_INLINE _syscall3(int, _statfs64, const char*, p, 3770 size_t, s,struct kernel_statfs64*, b) 3771 LSS_INLINE _syscall3(int, _fstatfs64, int, f, 3772 size_t, s,struct kernel_statfs64*, b) 3773 LSS_INLINE int LSS_NAME(statfs64)(const char *p, 3774 struct kernel_statfs64 *b) { 3775 return LSS_NAME(_statfs64)(p, sizeof(*b), b); 3776 } 3777 LSS_INLINE int LSS_NAME(fstatfs64)(int f,struct kernel_statfs64 *b) { 3778 return LSS_NAME(_fstatfs64)(f, sizeof(*b), b); 3779 } 3780 #endif 3781 3782 LSS_INLINE int LSS_NAME(execv)(const char *path, const char *const argv[]) { 3783 extern char **environ; 3784 return LSS_NAME(execve)(path, argv, (const char *const *)environ); 3785 } 3786 3787 LSS_INLINE pid_t LSS_NAME(gettid)(void) { 3788 pid_t tid = LSS_NAME(_gettid)(); 3789 if (tid != -1) { 3790 return tid; 3791 } 3792 return LSS_NAME(getpid)(); 3793 } 3794 3795 LSS_INLINE void *LSS_NAME(mremap)(void *old_address, size_t old_size, 3796 size_t new_size, int flags, ...) { 3797 va_list ap; 3798 void *new_address, *rc; 3799 va_start(ap, flags); 3800 new_address = va_arg(ap, void *); 3801 rc = LSS_NAME(_mremap)(old_address, old_size, new_size, 3802 flags, new_address); 3803 va_end(ap); 3804 return rc; 3805 } 3806 3807 LSS_INLINE int LSS_NAME(ptrace_detach)(pid_t pid) { 3808 /* PTRACE_DETACH can sometimes forget to wake up the tracee and it 3809 * then sends job control signals to the real parent, rather than to 3810 * the tracer. We reduce the risk of this happening by starting a 3811 * whole new time slice, and then quickly sending a SIGCONT signal 3812 * right after detaching from the tracee. 3813 * 3814 * We use tkill to ensure that we only issue a wakeup for the thread being 3815 * detached. Large multi threaded apps can take a long time in the kernel 3816 * processing SIGCONT. 3817 */ 3818 int rc, err; 3819 LSS_NAME(sched_yield)(); 3820 rc = LSS_NAME(ptrace)(PTRACE_DETACH, pid, (void *)0, (void *)0); 3821 err = LSS_ERRNO; 3822 LSS_NAME(tkill)(pid, SIGCONT); 3823 /* Old systems don't have tkill */ 3824 if (LSS_ERRNO == ENOSYS) 3825 LSS_NAME(kill)(pid, SIGCONT); 3826 LSS_ERRNO = err; 3827 return rc; 3828 } 3829 3830 LSS_INLINE int LSS_NAME(raise)(int sig) { 3831 return LSS_NAME(kill)(LSS_NAME(getpid)(), sig); 3832 } 3833 3834 LSS_INLINE int LSS_NAME(setpgrp)(void) { 3835 return LSS_NAME(setpgid)(0, 0); 3836 } 3837 3838 LSS_INLINE int LSS_NAME(sysconf)(int name) { 3839 extern int __getpagesize(void); 3840 switch (name) { 3841 case _SC_OPEN_MAX: { 3842 struct kernel_rlimit limit; 3843 #if defined(__ARM_EABI__) 3844 return LSS_NAME(ugetrlimit)(RLIMIT_NOFILE, &limit) < 0 3845 ? 8192 : limit.rlim_cur; 3846 #else 3847 return LSS_NAME(getrlimit)(RLIMIT_NOFILE, &limit) < 0 3848 ? 8192 : limit.rlim_cur; 3849 #endif 3850 } 3851 case _SC_PAGESIZE: 3852 return __getpagesize(); 3853 default: 3854 LSS_ERRNO = ENOSYS; 3855 return -1; 3856 } 3857 } 3858 #if defined(__x86_64__) 3859 /* Need to make sure loff_t isn't truncated to 32-bits under x32. */ 3860 LSS_INLINE ssize_t LSS_NAME(pread64)(int f, void *b, size_t c, loff_t o) { 3861 LSS_BODY(4, ssize_t, pread64, LSS_SYSCALL_ARG(f), LSS_SYSCALL_ARG(b), 3862 LSS_SYSCALL_ARG(c), (uint64_t)(o)); 3863 } 3864 3865 LSS_INLINE ssize_t LSS_NAME(pwrite64)(int f, const void *b, size_t c, 3866 loff_t o) { 3867 LSS_BODY(4, ssize_t, pwrite64, LSS_SYSCALL_ARG(f), LSS_SYSCALL_ARG(b), 3868 LSS_SYSCALL_ARG(c), (uint64_t)(o)); 3869 } 3870 3871 LSS_INLINE int LSS_NAME(readahead)(int f, loff_t o, unsigned c) { 3872 LSS_BODY(3, int, readahead, LSS_SYSCALL_ARG(f), (uint64_t)(o), 3873 LSS_SYSCALL_ARG(c)); 3874 } 3875 #elif defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI64 3876 LSS_INLINE _syscall4(ssize_t, pread64, int, f, 3877 void *, b, size_t, c, 3878 loff_t, o) 3879 LSS_INLINE _syscall4(ssize_t, pwrite64, int, f, 3880 const void *, b, size_t, c, 3881 loff_t, o) 3882 LSS_INLINE _syscall3(int, readahead, int, f, 3883 loff_t, o, unsigned, c) 3884 LSS_INLINE _syscall6(void *, mmap, void *, addr, size_t, length, int, prot, 3885 int, flags, int, fd, int64_t, offset) 3886 #else 3887 #define __NR__pread64 __NR_pread64 3888 #define __NR__pwrite64 __NR_pwrite64 3889 #define __NR__readahead __NR_readahead 3890 #if defined(__ARM_EABI__) || defined(__mips__) 3891 /* On ARM and MIPS, a 64-bit parameter has to be in an even-odd register 3892 * pair. Hence these calls ignore their fourth argument (r3) so that their 3893 * fifth and sixth make such a pair (r4,r5). 3894 */ 3895 #define LSS_LLARG_PAD 0, 3896 LSS_INLINE _syscall6(ssize_t, _pread64, int, f, 3897 void *, b, size_t, c, 3898 unsigned, skip, unsigned, o1, unsigned, o2) 3899 LSS_INLINE _syscall6(ssize_t, _pwrite64, int, f, 3900 const void *, b, size_t, c, 3901 unsigned, skip, unsigned, o1, unsigned, o2) 3902 LSS_INLINE _syscall5(int, _readahead, int, f, 3903 unsigned, skip, 3904 unsigned, o1, unsigned, o2, size_t, c) 3905 #else 3906 #define LSS_LLARG_PAD 3907 LSS_INLINE _syscall5(ssize_t, _pread64, int, f, 3908 void *, b, size_t, c, unsigned, o1, 3909 unsigned, o2) 3910 LSS_INLINE _syscall5(ssize_t, _pwrite64, int, f, 3911 const void *, b, size_t, c, unsigned, o1, 3912 long, o2) 3913 LSS_INLINE _syscall4(int, _readahead, int, f, 3914 unsigned, o1, unsigned, o2, size_t, c) 3915 #endif 3916 /* We force 64bit-wide parameters onto the stack, then access each 3917 * 32-bit component individually. This guarantees that we build the 3918 * correct parameters independent of the native byte-order of the 3919 * underlying architecture. 3920 */ 3921 LSS_INLINE ssize_t LSS_NAME(pread64)(int fd, void *buf, size_t count, 3922 loff_t off) { 3923 union { loff_t off; unsigned arg[2]; } o = { off }; 3924 return LSS_NAME(_pread64)(fd, buf, count, 3925 LSS_LLARG_PAD o.arg[0], o.arg[1]); 3926 } 3927 LSS_INLINE ssize_t LSS_NAME(pwrite64)(int fd, const void *buf, 3928 size_t count, loff_t off) { 3929 union { loff_t off; unsigned arg[2]; } o = { off }; 3930 return LSS_NAME(_pwrite64)(fd, buf, count, 3931 LSS_LLARG_PAD o.arg[0], o.arg[1]); 3932 } 3933 LSS_INLINE int LSS_NAME(readahead)(int fd, loff_t off, int len) { 3934 union { loff_t off; unsigned arg[2]; } o = { off }; 3935 return LSS_NAME(_readahead)(fd, LSS_LLARG_PAD o.arg[0], o.arg[1], len); 3936 } 3937 #endif 3938 #endif 3939 3940 #if defined(__aarch64__) 3941 LSS_INLINE _syscall3(int, dup3, int, s, int, d, int, f) 3942 LSS_INLINE _syscall6(void *, mmap, void *, addr, size_t, length, int, prot, 3943 int, flags, int, fd, int64_t, offset) 3944 LSS_INLINE _syscall4(int, newfstatat, int, dirfd, const char *, pathname, 3945 struct kernel_stat *, buf, int, flags) 3946 LSS_INLINE _syscall2(int, pipe2, int *, pipefd, int, flags) 3947 LSS_INLINE _syscall5(int, ppoll, struct kernel_pollfd *, u, 3948 unsigned int, n, const struct kernel_timespec *, t, 3949 const kernel_sigset_t *, sigmask, size_t, s) 3950 LSS_INLINE _syscall4(int, readlinkat, int, d, const char *, p, char *, b, 3951 size_t, s) 3952 #endif 3953 3954 /* 3955 * Polyfills for deprecated syscalls. 3956 */ 3957 3958 #if defined(__aarch64__) 3959 LSS_INLINE int LSS_NAME(dup2)(int s, int d) { 3960 return LSS_NAME(dup3)(s, d, 0); 3961 } 3962 3963 LSS_INLINE int LSS_NAME(open)(const char *pathname, int flags, int mode) { 3964 return LSS_NAME(openat)(AT_FDCWD, pathname, flags, mode); 3965 } 3966 3967 LSS_INLINE int LSS_NAME(unlink)(const char *pathname) { 3968 return LSS_NAME(unlinkat)(AT_FDCWD, pathname, 0); 3969 } 3970 3971 LSS_INLINE int LSS_NAME(readlink)(const char *pathname, char *buffer, 3972 size_t size) { 3973 return LSS_NAME(readlinkat)(AT_FDCWD, pathname, buffer, size); 3974 } 3975 3976 LSS_INLINE pid_t LSS_NAME(pipe)(int *pipefd) { 3977 return LSS_NAME(pipe2)(pipefd, 0); 3978 } 3979 3980 LSS_INLINE int LSS_NAME(poll)(struct kernel_pollfd *fds, unsigned int nfds, 3981 int timeout) { 3982 struct kernel_timespec timeout_ts; 3983 struct kernel_timespec *timeout_ts_p = NULL; 3984 3985 if (timeout >= 0) { 3986 timeout_ts.tv_sec = timeout / 1000; 3987 timeout_ts.tv_nsec = (timeout % 1000) * 1000000; 3988 timeout_ts_p = &timeout_ts; 3989 } 3990 return LSS_NAME(ppoll)(fds, nfds, timeout_ts_p, NULL, 0); 3991 } 3992 3993 LSS_INLINE int LSS_NAME(stat)(const char *pathname, 3994 struct kernel_stat *buf) { 3995 return LSS_NAME(newfstatat)(AT_FDCWD, pathname, buf, 0); 3996 } 3997 3998 LSS_INLINE pid_t LSS_NAME(fork)(void) { 3999 // No fork syscall on aarch64 - implement by means of the clone syscall. 4000 // Note that this does not reset glibc's cached view of the PID/TID, so 4001 // some glibc interfaces might go wrong in the forked subprocess. 4002 int flags = SIGCHLD; 4003 void *child_stack = NULL; 4004 void *parent_tidptr = NULL; 4005 void *newtls = NULL; 4006 void *child_tidptr = NULL; 4007 4008 LSS_REG(0, flags); 4009 LSS_REG(1, child_stack); 4010 LSS_REG(2, parent_tidptr); 4011 LSS_REG(3, newtls); 4012 LSS_REG(4, child_tidptr); 4013 LSS_BODY(pid_t, clone, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3), 4014 "r"(__r4)); 4015 } 4016 #endif 4017 4018 #ifdef __ANDROID__ 4019 /* These restore the original values of these macros saved by the 4020 * corresponding #pragma push_macro near the top of this file. */ 4021 # pragma pop_macro("stat64") 4022 # pragma pop_macro("fstat64") 4023 # pragma pop_macro("lstat64") 4024 #endif 4025 4026 #if defined(__cplusplus) && !defined(SYS_CPLUSPLUS) 4027 } 4028 #endif 4029 4030 #endif 4031 #endif 4032