Home | History | Annotate | Download | only in bitops
      1 #ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
      2 #define _ASM_GENERIC_BITOPS_ATOMIC_H_
      3 
      4 #include <asm/types.h>
      5 #include <asm/system.h>
      6 
      7 #ifdef CONFIG_SMP
      8 #include <asm/spinlock.h>
      9 #include <asm/cache.h>		/* we use L1_CACHE_BYTES */
     10 
     11 /* Use an array of spinlocks for our atomic_ts.
     12  * Hash function to index into a different SPINLOCK.
     13  * Since "a" is usually an address, use one spinlock per cacheline.
     14  */
     15 #  define ATOMIC_HASH_SIZE 4
     16 #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
     17 
     18 extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
     19 
     20 /* Can't use raw_spin_lock_irq because of #include problems, so
     21  * this is the substitute */
     22 #define _atomic_spin_lock_irqsave(l,f) do {	\
     23 	raw_spinlock_t *s = ATOMIC_HASH(l);	\
     24 	local_irq_save(f);			\
     25 	__raw_spin_lock(s);			\
     26 } while(0)
     27 
     28 #define _atomic_spin_unlock_irqrestore(l,f) do {	\
     29 	raw_spinlock_t *s = ATOMIC_HASH(l);		\
     30 	__raw_spin_unlock(s);				\
     31 	local_irq_restore(f);				\
     32 } while(0)
     33 
     34 
     35 #else
     36 #  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
     37 #  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
     38 #endif
     39 
     40 /*
     41  * NMI events can occur at any time, including when interrupts have been
     42  * disabled by *_irqsave().  So you can get NMI events occurring while a
     43  * *_bit function is holding a spin lock.  If the NMI handler also wants
     44  * to do bit manipulation (and they do) then you can get a deadlock
     45  * between the original caller of *_bit() and the NMI handler.
     46  *
     47  * by Keith Owens
     48  */
     49 
     50 /**
     51  * set_bit - Atomically set a bit in memory
     52  * @nr: the bit to set
     53  * @addr: the address to start counting from
     54  *
     55  * This function is atomic and may not be reordered.  See __set_bit()
     56  * if you do not require the atomic guarantees.
     57  *
     58  * Note: there are no guarantees that this function will not be reordered
     59  * on non x86 architectures, so if you are writing portable code,
     60  * make sure not to rely on its reordering guarantees.
     61  *
     62  * Note that @nr may be almost arbitrarily large; this function is not
     63  * restricted to acting on a single-word quantity.
     64  */
     65 static inline void set_bit(int nr, volatile unsigned long *addr)
     66 {
     67 	unsigned long mask = BIT_MASK(nr);
     68 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
     69 	unsigned long flags;
     70 
     71 	_atomic_spin_lock_irqsave(p, flags);
     72 	*p  |= mask;
     73 	_atomic_spin_unlock_irqrestore(p, flags);
     74 }
     75 
     76 /**
     77  * clear_bit - Clears a bit in memory
     78  * @nr: Bit to clear
     79  * @addr: Address to start counting from
     80  *
     81  * clear_bit() is atomic and may not be reordered.  However, it does
     82  * not contain a memory barrier, so if it is used for locking purposes,
     83  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
     84  * in order to ensure changes are visible on other processors.
     85  */
     86 static inline void clear_bit(int nr, volatile unsigned long *addr)
     87 {
     88 	unsigned long mask = BIT_MASK(nr);
     89 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
     90 	unsigned long flags;
     91 
     92 	_atomic_spin_lock_irqsave(p, flags);
     93 	*p &= ~mask;
     94 	_atomic_spin_unlock_irqrestore(p, flags);
     95 }
     96 
     97 /**
     98  * change_bit - Toggle a bit in memory
     99  * @nr: Bit to change
    100  * @addr: Address to start counting from
    101  *
    102  * change_bit() is atomic and may not be reordered. It may be
    103  * reordered on other architectures than x86.
    104  * Note that @nr may be almost arbitrarily large; this function is not
    105  * restricted to acting on a single-word quantity.
    106  */
    107 static inline void change_bit(int nr, volatile unsigned long *addr)
    108 {
    109 	unsigned long mask = BIT_MASK(nr);
    110 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
    111 	unsigned long flags;
    112 
    113 	_atomic_spin_lock_irqsave(p, flags);
    114 	*p ^= mask;
    115 	_atomic_spin_unlock_irqrestore(p, flags);
    116 }
    117 
    118 /**
    119  * test_and_set_bit - Set a bit and return its old value
    120  * @nr: Bit to set
    121  * @addr: Address to count from
    122  *
    123  * This operation is atomic and cannot be reordered.
    124  * It may be reordered on other architectures than x86.
    125  * It also implies a memory barrier.
    126  */
    127 static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
    128 {
    129 	unsigned long mask = BIT_MASK(nr);
    130 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
    131 	unsigned long old;
    132 	unsigned long flags;
    133 
    134 	_atomic_spin_lock_irqsave(p, flags);
    135 	old = *p;
    136 	*p = old | mask;
    137 	_atomic_spin_unlock_irqrestore(p, flags);
    138 
    139 	return (old & mask) != 0;
    140 }
    141 
    142 /**
    143  * test_and_clear_bit - Clear a bit and return its old value
    144  * @nr: Bit to clear
    145  * @addr: Address to count from
    146  *
    147  * This operation is atomic and cannot be reordered.
    148  * It can be reorderdered on other architectures other than x86.
    149  * It also implies a memory barrier.
    150  */
    151 static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
    152 {
    153 	unsigned long mask = BIT_MASK(nr);
    154 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
    155 	unsigned long old;
    156 	unsigned long flags;
    157 
    158 	_atomic_spin_lock_irqsave(p, flags);
    159 	old = *p;
    160 	*p = old & ~mask;
    161 	_atomic_spin_unlock_irqrestore(p, flags);
    162 
    163 	return (old & mask) != 0;
    164 }
    165 
    166 /**
    167  * test_and_change_bit - Change a bit and return its old value
    168  * @nr: Bit to change
    169  * @addr: Address to count from
    170  *
    171  * This operation is atomic and cannot be reordered.
    172  * It also implies a memory barrier.
    173  */
    174 static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
    175 {
    176 	unsigned long mask = BIT_MASK(nr);
    177 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
    178 	unsigned long old;
    179 	unsigned long flags;
    180 
    181 	_atomic_spin_lock_irqsave(p, flags);
    182 	old = *p;
    183 	*p = old ^ mask;
    184 	_atomic_spin_unlock_irqrestore(p, flags);
    185 
    186 	return (old & mask) != 0;
    187 }
    188 
    189 #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
    190