Home | History | Annotate | Download | only in asm-arm

Lines Matching full:lock

12  * won the lock, so we try exclusively storing it.  A memory barrier
13 * is required after we get a lock, and before we release it, because
20 #define __raw_spin_is_locked(x) ((x)->lock != 0)
21 #define __raw_spin_unlock_wait(lock) \
22 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
24 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
26 static inline void __raw_spin_lock(raw_spinlock_t *lock)
40 : "r" (&lock->lock), "r" (1)
46 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
55 : "r" (&lock->lock), "r" (1)
66 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
77 : "r" (&lock->lock), "r" (0)
86 * just write zero since the lock is exclusively held.
104 : "r" (&rw->lock), "r" (0x80000000)
119 : "r" (&rw->lock), "r" (0x80000000)
141 : "r" (&rw->lock), "r" (0)
146 #define __raw_write_can_lock(x) ((x)->lock == 0x80000000)
150 * - Exclusively load the lock value.
152 * - Store new lock value if positive, and we still own this location.
174 : "r" (&rw->lock)
198 : "r" (&rw->lock)
211 : "r" (&rw->lock)
219 #define __raw_read_can_lock(x) ((x)->lock < 0x80000000)