Home | History | Annotate | Download | only in asm-x86
      1 #ifndef __ASM_SPINLOCK_H
      2 #define __ASM_SPINLOCK_H
      3 
      4 #include <asm/atomic.h>
      5 #include <asm/rwlock.h>
      6 #include <asm/page.h>
      7 #include <asm/processor.h>
      8 #include <linux/compiler.h>
      9 
     10 #ifdef CONFIG_PARAVIRT
     11 #include <asm/paravirt.h>
     12 #else
     13 #define CLI_STRING	"cli"
     14 #define STI_STRING	"sti"
     15 #define CLI_STI_CLOBBERS
     16 #define CLI_STI_INPUT_ARGS
     17 #endif /* CONFIG_PARAVIRT */
     18 
     19 /*
     20  * Your basic SMP spinlocks, allowing only a single CPU anywhere
     21  *
     22  * Simple spin lock operations.  There are two variants, one clears IRQ's
     23  * on the local processor, one does not.
     24  *
     25  * We make no fairness assumptions. They have a cost.
     26  *
     27  * (the type definitions are in asm/spinlock_types.h)
     28  */
     29 
     30 static inline int __raw_spin_is_locked(raw_spinlock_t *x)
     31 {
     32 	return *(volatile signed char *)(&(x)->slock) <= 0;
     33 }
     34 
     35 static inline void __raw_spin_lock(raw_spinlock_t *lock)
     36 {
     37 	asm volatile("\n1:\t"
     38 		     LOCK_PREFIX " ; decb %0\n\t"
     39 		     "jns 3f\n"
     40 		     "2:\t"
     41 		     "rep;nop\n\t"
     42 		     "cmpb $0,%0\n\t"
     43 		     "jle 2b\n\t"
     44 		     "jmp 1b\n"
     45 		     "3:\n\t"
     46 		     : "+m" (lock->slock) : : "memory");
     47 }
     48 
     49 /*
     50  * It is easier for the lock validator if interrupts are not re-enabled
     51  * in the middle of a lock-acquire. This is a performance feature anyway
     52  * so we turn it off:
     53  *
     54  * NOTE: there's an irqs-on section here, which normally would have to be
     55  * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
     56  */
     57 #ifndef CONFIG_PROVE_LOCKING
     58 static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
     59 {
     60 	asm volatile(
     61 		"\n1:\t"
     62 		LOCK_PREFIX " ; decb %[slock]\n\t"
     63 		"jns 5f\n"
     64 		"2:\t"
     65 		"testl $0x200, %[flags]\n\t"
     66 		"jz 4f\n\t"
     67 		STI_STRING "\n"
     68 		"3:\t"
     69 		"rep;nop\n\t"
     70 		"cmpb $0, %[slock]\n\t"
     71 		"jle 3b\n\t"
     72 		CLI_STRING "\n\t"
     73 		"jmp 1b\n"
     74 		"4:\t"
     75 		"rep;nop\n\t"
     76 		"cmpb $0, %[slock]\n\t"
     77 		"jg 1b\n\t"
     78 		"jmp 4b\n"
     79 		"5:\n\t"
     80 		: [slock] "+m" (lock->slock)
     81 		: [flags] "r" (flags)
     82 	 	  CLI_STI_INPUT_ARGS
     83 		: "memory" CLI_STI_CLOBBERS);
     84 }
     85 #endif
     86 
     87 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
     88 {
     89 	char oldval;
     90 	asm volatile(
     91 		"xchgb %b0,%1"
     92 		:"=q" (oldval), "+m" (lock->slock)
     93 		:"0" (0) : "memory");
     94 	return oldval > 0;
     95 }
     96 
     97 /*
     98  * __raw_spin_unlock based on writing $1 to the low byte.
     99  * This method works. Despite all the confusion.
    100  * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
    101  * (PPro errata 66, 92)
    102  */
    103 
    104 #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
    105 
    106 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
    107 {
    108 	asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory");
    109 }
    110 
    111 #else
    112 
    113 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
    114 {
    115 	char oldval = 1;
    116 
    117 	asm volatile("xchgb %b0, %1"
    118 		     : "=q" (oldval), "+m" (lock->slock)
    119 		     : "0" (oldval) : "memory");
    120 }
    121 
    122 #endif
    123 
    124 static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
    125 {
    126 	while (__raw_spin_is_locked(lock))
    127 		cpu_relax();
    128 }
    129 
    130 /*
    131  * Read-write spinlocks, allowing multiple readers
    132  * but only one writer.
    133  *
    134  * NOTE! it is quite common to have readers in interrupts
    135  * but no interrupt writers. For those circumstances we
    136  * can "mix" irq-safe locks - any writer needs to get a
    137  * irq-safe write-lock, but readers can get non-irqsafe
    138  * read-locks.
    139  *
    140  * On x86, we implement read-write locks as a 32-bit counter
    141  * with the high bit (sign) being the "contended" bit.
    142  *
    143  * The inline assembly is non-obvious. Think about it.
    144  *
    145  * Changed to use the same technique as rw semaphores.  See
    146  * semaphore.h for details.  -ben
    147  *
    148  * the helpers are in arch/i386/kernel/semaphore.c
    149  */
    150 
    151 /**
    152  * read_can_lock - would read_trylock() succeed?
    153  * @lock: the rwlock in question.
    154  */
    155 static inline int __raw_read_can_lock(raw_rwlock_t *x)
    156 {
    157 	return (int)(x)->lock > 0;
    158 }
    159 
    160 /**
    161  * write_can_lock - would write_trylock() succeed?
    162  * @lock: the rwlock in question.
    163  */
    164 static inline int __raw_write_can_lock(raw_rwlock_t *x)
    165 {
    166 	return (x)->lock == RW_LOCK_BIAS;
    167 }
    168 
    169 static inline void __raw_read_lock(raw_rwlock_t *rw)
    170 {
    171 	asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
    172 		     "jns 1f\n"
    173 		     "call __read_lock_failed\n\t"
    174 		     "1:\n"
    175 		     ::"a" (rw) : "memory");
    176 }
    177 
    178 static inline void __raw_write_lock(raw_rwlock_t *rw)
    179 {
    180 	asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t"
    181 		     "jz 1f\n"
    182 		     "call __write_lock_failed\n\t"
    183 		     "1:\n"
    184 		     ::"a" (rw) : "memory");
    185 }
    186 
    187 static inline int __raw_read_trylock(raw_rwlock_t *lock)
    188 {
    189 	atomic_t *count = (atomic_t *)lock;
    190 	atomic_dec(count);
    191 	if (atomic_read(count) >= 0)
    192 		return 1;
    193 	atomic_inc(count);
    194 	return 0;
    195 }
    196 
    197 static inline int __raw_write_trylock(raw_rwlock_t *lock)
    198 {
    199 	atomic_t *count = (atomic_t *)lock;
    200 	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
    201 		return 1;
    202 	atomic_add(RW_LOCK_BIAS, count);
    203 	return 0;
    204 }
    205 
    206 static inline void __raw_read_unlock(raw_rwlock_t *rw)
    207 {
    208 	asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
    209 }
    210 
    211 static inline void __raw_write_unlock(raw_rwlock_t *rw)
    212 {
    213 	asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
    214 				 : "+m" (rw->lock) : : "memory");
    215 }
    216 
    217 #define _raw_spin_relax(lock)	cpu_relax()
    218 #define _raw_read_relax(lock)	cpu_relax()
    219 #define _raw_write_relax(lock)	cpu_relax()
    220 
    221 #endif /* __ASM_SPINLOCK_H */
    222