1 #ifndef _LINUX_FUTEX_H 2 #define _LINUX_FUTEX_H 3 4 #include <linux/sched.h> 5 6 /* Second argument to futex syscall */ 7 8 9 #define FUTEX_WAIT 0 10 #define FUTEX_WAKE 1 11 #define FUTEX_FD 2 12 #define FUTEX_REQUEUE 3 13 #define FUTEX_CMP_REQUEUE 4 14 #define FUTEX_WAKE_OP 5 15 #define FUTEX_LOCK_PI 6 16 #define FUTEX_UNLOCK_PI 7 17 #define FUTEX_TRYLOCK_PI 8 18 19 /* 20 * Support for robust futexes: the kernel cleans up held futexes at 21 * thread exit time. 22 */ 23 24 /* 25 * Per-lock list entry - embedded in user-space locks, somewhere close 26 * to the futex field. (Note: user-space uses a double-linked list to 27 * achieve O(1) list add and remove, but the kernel only needs to know 28 * about the forward link) 29 * 30 * NOTE: this structure is part of the syscall ABI, and must not be 31 * changed. 32 */ 33 struct robust_list { 34 struct robust_list __user *next; 35 }; 36 37 /* 38 * Per-thread list head: 39 * 40 * NOTE: this structure is part of the syscall ABI, and must only be 41 * changed if the change is first communicated with the glibc folks. 42 * (When an incompatible change is done, we'll increase the structure 43 * size, which glibc will detect) 44 */ 45 struct robust_list_head { 46 /* 47 * The head of the list. Points back to itself if empty: 48 */ 49 struct robust_list list; 50 51 /* 52 * This relative offset is set by user-space, it gives the kernel 53 * the relative position of the futex field to examine. This way 54 * we keep userspace flexible, to freely shape its data-structure, 55 * without hardcoding any particular offset into the kernel: 56 */ 57 long futex_offset; 58 59 /* 60 * The death of the thread may race with userspace setting 61 * up a lock's links. So to handle this race, userspace first 62 * sets this field to the address of the to-be-taken lock, 63 * then does the lock acquire, and then adds itself to the 64 * list, and then clears this field. Hence the kernel will 65 * always have full knowledge of all locks that the thread 66 * _might_ have taken. We check the owner TID in any case, 67 * so only truly owned locks will be handled. 68 */ 69 struct robust_list __user *list_op_pending; 70 }; 71 72 /* 73 * Are there any waiters for this robust futex: 74 */ 75 #define FUTEX_WAITERS 0x80000000 76 77 /* 78 * The kernel signals via this bit that a thread holding a futex 79 * has exited without unlocking the futex. The kernel also does 80 * a FUTEX_WAKE on such futexes, after setting the bit, to wake 81 * up any possible waiters: 82 */ 83 #define FUTEX_OWNER_DIED 0x40000000 84 85 /* 86 * The rest of the robust-futex field is for the TID: 87 */ 88 #define FUTEX_TID_MASK 0x3fffffff 89 90 /* 91 * This limit protects against a deliberately circular list. 92 * (Not worth introducing an rlimit for it) 93 */ 94 #define ROBUST_LIST_LIMIT 2048 95 96 long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, 97 u32 __user *uaddr2, u32 val2, u32 val3); 98 99 extern int 100 handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi); 101 102 #ifdef CONFIG_FUTEX 103 extern void exit_robust_list(struct task_struct *curr); 104 extern void exit_pi_state_list(struct task_struct *curr); 105 #else 106 static inline void exit_robust_list(struct task_struct *curr) 107 { 108 } 109 static inline void exit_pi_state_list(struct task_struct *curr) 110 { 111 } 112 #endif 113 114 #define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */ 115 #define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */ 116 #define FUTEX_OP_OR 2 /* *(int *)UADDR2 |= OPARG; */ 117 #define FUTEX_OP_ANDN 3 /* *(int *)UADDR2 &= ~OPARG; */ 118 #define FUTEX_OP_XOR 4 /* *(int *)UADDR2 ^= OPARG; */ 119 120 #define FUTEX_OP_OPARG_SHIFT 8 /* Use (1 << OPARG) instead of OPARG. */ 121 122 #define FUTEX_OP_CMP_EQ 0 /* if (oldval == CMPARG) wake */ 123 #define FUTEX_OP_CMP_NE 1 /* if (oldval != CMPARG) wake */ 124 #define FUTEX_OP_CMP_LT 2 /* if (oldval < CMPARG) wake */ 125 #define FUTEX_OP_CMP_LE 3 /* if (oldval <= CMPARG) wake */ 126 #define FUTEX_OP_CMP_GT 4 /* if (oldval > CMPARG) wake */ 127 #define FUTEX_OP_CMP_GE 5 /* if (oldval >= CMPARG) wake */ 128 129 /* FUTEX_WAKE_OP will perform atomically 130 int oldval = *(int *)UADDR2; 131 *(int *)UADDR2 = oldval OP OPARG; 132 if (oldval CMP CMPARG) 133 wake UADDR2; */ 134 135 #define FUTEX_OP(op, oparg, cmp, cmparg) \ 136 (((op & 0xf) << 28) | ((cmp & 0xf) << 24) \ 137 | ((oparg & 0xfff) << 12) | (cmparg & 0xfff)) 138 139 #endif 140