1 /* 2 * asm-generic/mutex-xchg.h 3 * 4 * Generic implementation of the mutex fastpath, based on xchg(). 5 * 6 * NOTE: An xchg based implementation might be less optimal than an atomic 7 * decrement/increment based implementation. If your architecture 8 * has a reasonable atomic dec/inc then you should probably use 9 * asm-generic/mutex-dec.h instead, or you could open-code an 10 * optimized version in asm/mutex.h. 11 */ 12 #ifndef _ASM_GENERIC_MUTEX_XCHG_H 13 #define _ASM_GENERIC_MUTEX_XCHG_H 14 15 /** 16 * __mutex_fastpath_lock - try to take the lock by moving the count 17 * from 1 to a 0 value 18 * @count: pointer of type atomic_t 19 * @fail_fn: function to call if the original value was not 1 20 * 21 * Change the count from 1 to a value lower than 1, and call <fail_fn> if it 22 * wasn't 1 originally. This function MUST leave the value lower than 1 23 * even when the "1" assertion wasn't true. 24 */ 25 static inline void 26 __mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) 27 { 28 if (unlikely(atomic_xchg(count, 0) != 1)) 29 fail_fn(count); 30 else 31 smp_mb(); 32 } 33 34 /** 35 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 36 * from 1 to a 0 value 37 * @count: pointer of type atomic_t 38 * @fail_fn: function to call if the original value was not 1 39 * 40 * Change the count from 1 to a value lower than 1, and call <fail_fn> if it 41 * wasn't 1 originally. This function returns 0 if the fastpath succeeds, 42 * or anything the slow path function returns 43 */ 44 static inline int 45 __mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *)) 46 { 47 if (unlikely(atomic_xchg(count, 0) != 1)) 48 return fail_fn(count); 49 else { 50 smp_mb(); 51 return 0; 52 } 53 } 54 55 /** 56 * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1 57 * @count: pointer of type atomic_t 58 * @fail_fn: function to call if the original value was not 0 59 * 60 * try to promote the mutex from 0 to 1. if it wasn't 0, call <function> 61 * In the failure case, this function is allowed to either set the value to 62 * 1, or to set it to a value lower than one. 63 * If the implementation sets it to a value of lower than one, the 64 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs 65 * to return 0 otherwise. 66 */ 67 static inline void 68 __mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) 69 { 70 smp_mb(); 71 if (unlikely(atomic_xchg(count, 1) != 0)) 72 fail_fn(count); 73 } 74 75 #define __mutex_slowpath_needs_to_unlock() 0 76 77 /** 78 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting 79 * 80 * @count: pointer of type atomic_t 81 * @fail_fn: spinlock based trylock implementation 82 * 83 * Change the count from 1 to a value lower than 1, and return 0 (failure) 84 * if it wasn't 1 originally, or return 1 (success) otherwise. This function 85 * MUST leave the value lower than 1 even when the "1" assertion wasn't true. 86 * Additionally, if the value was < 0 originally, this function must not leave 87 * it to 0 on failure. 88 * 89 * If the architecture has no effective trylock variant, it should call the 90 * <fail_fn> spinlock-based trylock variant unconditionally. 91 */ 92 static inline int 93 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) 94 { 95 int prev = atomic_xchg(count, 0); 96 97 if (unlikely(prev < 0)) { 98 /* 99 * The lock was marked contended so we must restore that 100 * state. If while doing so we get back a prev value of 1 101 * then we just own it. 102 * 103 * [ In the rare case of the mutex going to 1, to 0, to -1 104 * and then back to 0 in this few-instructions window, 105 * this has the potential to trigger the slowpath for the 106 * owner's unlock path needlessly, but that's not a problem 107 * in practice. ] 108 */ 109 prev = atomic_xchg(count, prev); 110 if (prev < 0) 111 prev = 0; 112 } 113 smp_mb(); 114 115 return prev; 116 } 117 118 #endif 119