Home | History | Annotate | Download | only in asm-x86
      1 #ifndef _I386_BITOPS_H
      2 #define _I386_BITOPS_H
      3 
      4 /*
      5  * Copyright 1992, Linus Torvalds.
      6  */
      7 
      8 #ifndef _LINUX_BITOPS_H
      9 #error only <linux/bitops.h> can be included directly
     10 #endif
     11 
     12 #include <linux/compiler.h>
     13 #include <asm/alternative.h>
     14 
     15 /*
     16  * These have to be done with inline assembly: that way the bit-setting
     17  * is guaranteed to be atomic. All bit operations return 0 if the bit
     18  * was cleared before the operation and != 0 if it was not.
     19  *
     20  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
     21  */
     22 
     23 #define ADDR (*(volatile long *) addr)
     24 
     25 /**
     26  * set_bit - Atomically set a bit in memory
     27  * @nr: the bit to set
     28  * @addr: the address to start counting from
     29  *
     30  * This function is atomic and may not be reordered.  See __set_bit()
     31  * if you do not require the atomic guarantees.
     32  *
     33  * Note: there are no guarantees that this function will not be reordered
     34  * on non x86 architectures, so if you are writing portable code,
     35  * make sure not to rely on its reordering guarantees.
     36  *
     37  * Note that @nr may be almost arbitrarily large; this function is not
     38  * restricted to acting on a single-word quantity.
     39  */
     40 static inline void set_bit(int nr, volatile unsigned long * addr)
     41 {
     42 	__asm__ __volatile__( LOCK_PREFIX
     43 		"btsl %1,%0"
     44 		:"+m" (ADDR)
     45 		:"Ir" (nr));
     46 }
     47 
     48 /**
     49  * __set_bit - Set a bit in memory
     50  * @nr: the bit to set
     51  * @addr: the address to start counting from
     52  *
     53  * Unlike set_bit(), this function is non-atomic and may be reordered.
     54  * If it's called on the same region of memory simultaneously, the effect
     55  * may be that only one operation succeeds.
     56  */
     57 static inline void __set_bit(int nr, volatile unsigned long * addr)
     58 {
     59 	__asm__(
     60 		"btsl %1,%0"
     61 		:"+m" (ADDR)
     62 		:"Ir" (nr));
     63 }
     64 
     65 /**
     66  * clear_bit - Clears a bit in memory
     67  * @nr: Bit to clear
     68  * @addr: Address to start counting from
     69  *
     70  * clear_bit() is atomic and may not be reordered.  However, it does
     71  * not contain a memory barrier, so if it is used for locking purposes,
     72  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
     73  * in order to ensure changes are visible on other processors.
     74  */
     75 static inline void clear_bit(int nr, volatile unsigned long * addr)
     76 {
     77 	__asm__ __volatile__( LOCK_PREFIX
     78 		"btrl %1,%0"
     79 		:"+m" (ADDR)
     80 		:"Ir" (nr));
     81 }
     82 
     83 /*
     84  * clear_bit_unlock - Clears a bit in memory
     85  * @nr: Bit to clear
     86  * @addr: Address to start counting from
     87  *
     88  * clear_bit() is atomic and implies release semantics before the memory
     89  * operation. It can be used for an unlock.
     90  */
     91 static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
     92 {
     93 	barrier();
     94 	clear_bit(nr, addr);
     95 }
     96 
     97 static inline void __clear_bit(int nr, volatile unsigned long * addr)
     98 {
     99 	__asm__ __volatile__(
    100 		"btrl %1,%0"
    101 		:"+m" (ADDR)
    102 		:"Ir" (nr));
    103 }
    104 
    105 /*
    106  * __clear_bit_unlock - Clears a bit in memory
    107  * @nr: Bit to clear
    108  * @addr: Address to start counting from
    109  *
    110  * __clear_bit() is non-atomic and implies release semantics before the memory
    111  * operation. It can be used for an unlock if no other CPUs can concurrently
    112  * modify other bits in the word.
    113  *
    114  * No memory barrier is required here, because x86 cannot reorder stores past
    115  * older loads. Same principle as spin_unlock.
    116  */
    117 static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
    118 {
    119 	barrier();
    120 	__clear_bit(nr, addr);
    121 }
    122 
    123 #define smp_mb__before_clear_bit()	barrier()
    124 #define smp_mb__after_clear_bit()	barrier()
    125 
    126 /**
    127  * __change_bit - Toggle a bit in memory
    128  * @nr: the bit to change
    129  * @addr: the address to start counting from
    130  *
    131  * Unlike change_bit(), this function is non-atomic and may be reordered.
    132  * If it's called on the same region of memory simultaneously, the effect
    133  * may be that only one operation succeeds.
    134  */
    135 static inline void __change_bit(int nr, volatile unsigned long * addr)
    136 {
    137 	__asm__ __volatile__(
    138 		"btcl %1,%0"
    139 		:"+m" (ADDR)
    140 		:"Ir" (nr));
    141 }
    142 
    143 /**
    144  * change_bit - Toggle a bit in memory
    145  * @nr: Bit to change
    146  * @addr: Address to start counting from
    147  *
    148  * change_bit() is atomic and may not be reordered. It may be
    149  * reordered on other architectures than x86.
    150  * Note that @nr may be almost arbitrarily large; this function is not
    151  * restricted to acting on a single-word quantity.
    152  */
    153 static inline void change_bit(int nr, volatile unsigned long * addr)
    154 {
    155 	__asm__ __volatile__( LOCK_PREFIX
    156 		"btcl %1,%0"
    157 		:"+m" (ADDR)
    158 		:"Ir" (nr));
    159 }
    160 
    161 /**
    162  * test_and_set_bit - Set a bit and return its old value
    163  * @nr: Bit to set
    164  * @addr: Address to count from
    165  *
    166  * This operation is atomic and cannot be reordered.
    167  * It may be reordered on other architectures than x86.
    168  * It also implies a memory barrier.
    169  */
    170 static inline int test_and_set_bit(int nr, volatile unsigned long * addr)
    171 {
    172 	int oldbit;
    173 
    174 	__asm__ __volatile__( LOCK_PREFIX
    175 		"btsl %2,%1\n\tsbbl %0,%0"
    176 		:"=r" (oldbit),"+m" (ADDR)
    177 		:"Ir" (nr) : "memory");
    178 	return oldbit;
    179 }
    180 
    181 /**
    182  * test_and_set_bit_lock - Set a bit and return its old value for lock
    183  * @nr: Bit to set
    184  * @addr: Address to count from
    185  *
    186  * This is the same as test_and_set_bit on x86.
    187  */
    188 static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
    189 {
    190 	return test_and_set_bit(nr, addr);
    191 }
    192 
    193 /**
    194  * __test_and_set_bit - Set a bit and return its old value
    195  * @nr: Bit to set
    196  * @addr: Address to count from
    197  *
    198  * This operation is non-atomic and can be reordered.
    199  * If two examples of this operation race, one can appear to succeed
    200  * but actually fail.  You must protect multiple accesses with a lock.
    201  */
    202 static inline int __test_and_set_bit(int nr, volatile unsigned long * addr)
    203 {
    204 	int oldbit;
    205 
    206 	__asm__(
    207 		"btsl %2,%1\n\tsbbl %0,%0"
    208 		:"=r" (oldbit),"+m" (ADDR)
    209 		:"Ir" (nr));
    210 	return oldbit;
    211 }
    212 
    213 /**
    214  * test_and_clear_bit - Clear a bit and return its old value
    215  * @nr: Bit to clear
    216  * @addr: Address to count from
    217  *
    218  * This operation is atomic and cannot be reordered.
    219  * It can be reorderdered on other architectures other than x86.
    220  * It also implies a memory barrier.
    221  */
    222 static inline int test_and_clear_bit(int nr, volatile unsigned long * addr)
    223 {
    224 	int oldbit;
    225 
    226 	__asm__ __volatile__( LOCK_PREFIX
    227 		"btrl %2,%1\n\tsbbl %0,%0"
    228 		:"=r" (oldbit),"+m" (ADDR)
    229 		:"Ir" (nr) : "memory");
    230 	return oldbit;
    231 }
    232 
    233 /**
    234  * __test_and_clear_bit - Clear a bit and return its old value
    235  * @nr: Bit to clear
    236  * @addr: Address to count from
    237  *
    238  * This operation is non-atomic and can be reordered.
    239  * If two examples of this operation race, one can appear to succeed
    240  * but actually fail.  You must protect multiple accesses with a lock.
    241  */
    242 static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
    243 {
    244 	int oldbit;
    245 
    246 	__asm__(
    247 		"btrl %2,%1\n\tsbbl %0,%0"
    248 		:"=r" (oldbit),"+m" (ADDR)
    249 		:"Ir" (nr));
    250 	return oldbit;
    251 }
    252 
    253 /* WARNING: non atomic and it can be reordered! */
    254 static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
    255 {
    256 	int oldbit;
    257 
    258 	__asm__ __volatile__(
    259 		"btcl %2,%1\n\tsbbl %0,%0"
    260 		:"=r" (oldbit),"+m" (ADDR)
    261 		:"Ir" (nr) : "memory");
    262 	return oldbit;
    263 }
    264 
    265 /**
    266  * test_and_change_bit - Change a bit and return its old value
    267  * @nr: Bit to change
    268  * @addr: Address to count from
    269  *
    270  * This operation is atomic and cannot be reordered.
    271  * It also implies a memory barrier.
    272  */
    273 static inline int test_and_change_bit(int nr, volatile unsigned long* addr)
    274 {
    275 	int oldbit;
    276 
    277 	__asm__ __volatile__( LOCK_PREFIX
    278 		"btcl %2,%1\n\tsbbl %0,%0"
    279 		:"=r" (oldbit),"+m" (ADDR)
    280 		:"Ir" (nr) : "memory");
    281 	return oldbit;
    282 }
    283 
    284 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
    285 /**
    286  * test_bit - Determine whether a bit is set
    287  * @nr: bit number to test
    288  * @addr: Address to start counting from
    289  */
    290 static int test_bit(int nr, const volatile void * addr);
    291 #endif
    292 
    293 static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr)
    294 {
    295 	return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
    296 }
    297 
    298 static inline int variable_test_bit(int nr, const volatile unsigned long * addr)
    299 {
    300 	int oldbit;
    301 
    302 	__asm__ __volatile__(
    303 		"btl %2,%1\n\tsbbl %0,%0"
    304 		:"=r" (oldbit)
    305 		:"m" (ADDR),"Ir" (nr));
    306 	return oldbit;
    307 }
    308 
    309 #define test_bit(nr,addr) \
    310 (__builtin_constant_p(nr) ? \
    311  constant_test_bit((nr),(addr)) : \
    312  variable_test_bit((nr),(addr)))
    313 
    314 #undef ADDR
    315 
    316 /**
    317  * find_first_zero_bit - find the first zero bit in a memory region
    318  * @addr: The address to start the search at
    319  * @size: The maximum size to search
    320  *
    321  * Returns the bit-number of the first zero bit, not the number of the byte
    322  * containing a bit.
    323  */
    324 static inline int find_first_zero_bit(const unsigned long *addr, unsigned size)
    325 {
    326 	int d0, d1, d2;
    327 	int res;
    328 
    329 	if (!size)
    330 		return 0;
    331 	/* This looks at memory. Mark it volatile to tell gcc not to move it around */
    332 	__asm__ __volatile__(
    333 		"movl $-1,%%eax\n\t"
    334 		"xorl %%edx,%%edx\n\t"
    335 		"repe; scasl\n\t"
    336 		"je 1f\n\t"
    337 		"xorl -4(%%edi),%%eax\n\t"
    338 		"subl $4,%%edi\n\t"
    339 		"bsfl %%eax,%%edx\n"
    340 		"1:\tsubl %%ebx,%%edi\n\t"
    341 		"shll $3,%%edi\n\t"
    342 		"addl %%edi,%%edx"
    343 		:"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
    344 		:"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory");
    345 	return res;
    346 }
    347 
    348 /**
    349  * find_next_zero_bit - find the first zero bit in a memory region
    350  * @addr: The address to base the search on
    351  * @offset: The bitnumber to start searching at
    352  * @size: The maximum size to search
    353  */
    354 int find_next_zero_bit(const unsigned long *addr, int size, int offset);
    355 
    356 /**
    357  * __ffs - find first bit in word.
    358  * @word: The word to search
    359  *
    360  * Undefined if no bit exists, so code should check against 0 first.
    361  */
    362 static inline unsigned long __ffs(unsigned long word)
    363 {
    364 	__asm__("bsfl %1,%0"
    365 		:"=r" (word)
    366 		:"rm" (word));
    367 	return word;
    368 }
    369 
    370 /**
    371  * find_first_bit - find the first set bit in a memory region
    372  * @addr: The address to start the search at
    373  * @size: The maximum size to search
    374  *
    375  * Returns the bit-number of the first set bit, not the number of the byte
    376  * containing a bit.
    377  */
    378 static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
    379 {
    380 	unsigned x = 0;
    381 
    382 	while (x < size) {
    383 		unsigned long val = *addr++;
    384 		if (val)
    385 			return __ffs(val) + x;
    386 		x += (sizeof(*addr)<<3);
    387 	}
    388 	return x;
    389 }
    390 
    391 /**
    392  * find_next_bit - find the first set bit in a memory region
    393  * @addr: The address to base the search on
    394  * @offset: The bitnumber to start searching at
    395  * @size: The maximum size to search
    396  */
    397 int find_next_bit(const unsigned long *addr, int size, int offset);
    398 
    399 /**
    400  * ffz - find first zero in word.
    401  * @word: The word to search
    402  *
    403  * Undefined if no zero exists, so code should check against ~0UL first.
    404  */
    405 static inline unsigned long ffz(unsigned long word)
    406 {
    407 	__asm__("bsfl %1,%0"
    408 		:"=r" (word)
    409 		:"r" (~word));
    410 	return word;
    411 }
    412 
    413 #ifdef __KERNEL__
    414 
    415 #include <asm-generic/bitops/sched.h>
    416 
    417 /**
    418  * ffs - find first bit set
    419  * @x: the word to search
    420  *
    421  * This is defined the same way as
    422  * the libc and compiler builtin ffs routines, therefore
    423  * differs in spirit from the above ffz() (man ffs).
    424  */
    425 static inline int ffs(int x)
    426 {
    427 	int r;
    428 
    429 	__asm__("bsfl %1,%0\n\t"
    430 		"jnz 1f\n\t"
    431 		"movl $-1,%0\n"
    432 		"1:" : "=r" (r) : "rm" (x));
    433 	return r+1;
    434 }
    435 
    436 /**
    437  * fls - find last bit set
    438  * @x: the word to search
    439  *
    440  * This is defined the same way as ffs().
    441  */
    442 static inline int fls(int x)
    443 {
    444 	int r;
    445 
    446 	__asm__("bsrl %1,%0\n\t"
    447 		"jnz 1f\n\t"
    448 		"movl $-1,%0\n"
    449 		"1:" : "=r" (r) : "rm" (x));
    450 	return r+1;
    451 }
    452 
    453 #include <asm-generic/bitops/hweight.h>
    454 
    455 #endif /* __KERNEL__ */
    456 
    457 #include <asm-generic/bitops/fls64.h>
    458 
    459 #ifdef __KERNEL__
    460 
    461 #include <asm-generic/bitops/ext2-non-atomic.h>
    462 
    463 #define ext2_set_bit_atomic(lock,nr,addr) \
    464         test_and_set_bit((nr),(unsigned long*)addr)
    465 #define ext2_clear_bit_atomic(lock,nr, addr) \
    466 	        test_and_clear_bit((nr),(unsigned long*)addr)
    467 
    468 #include <asm-generic/bitops/minix.h>
    469 
    470 #endif /* __KERNEL__ */
    471 
    472 #endif /* _I386_BITOPS_H */
    473