Home | History | Annotate | Download | only in asm-x86
      1 #ifndef __ASM_CMPXCHG_H
      2 #define __ASM_CMPXCHG_H
      3 
      4 #include <linux/bitops.h> /* for LOCK_PREFIX */
      5 
      6 /*
      7  * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
      8  *       you need to test for the feature in boot_cpu_data.
      9  */
     10 
     11 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
     12 
     13 struct __xchg_dummy { unsigned long a[100]; };
     14 #define __xg(x) ((struct __xchg_dummy *)(x))
     15 
     16 /*
     17  * The semantics of XCHGCMP8B are a bit strange, this is why
     18  * there is a loop and the loading of %%eax and %%edx has to
     19  * be inside. This inlines well in most cases, the cached
     20  * cost is around ~38 cycles. (in the future we might want
     21  * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
     22  * might have an implicit FPU-save as a cost, so it's not
     23  * clear which path to go.)
     24  *
     25  * cmpxchg8b must be used with the lock prefix here to allow
     26  * the instruction to be executed atomically, see page 3-102
     27  * of the instruction set reference 24319102.pdf. We need
     28  * the reader side to see the coherent 64bit value.
     29  */
     30 static inline void __set_64bit (unsigned long long * ptr,
     31 		unsigned int low, unsigned int high)
     32 {
     33 	__asm__ __volatile__ (
     34 		"\n1:\t"
     35 		"movl (%0), %%eax\n\t"
     36 		"movl 4(%0), %%edx\n\t"
     37 		LOCK_PREFIX "cmpxchg8b (%0)\n\t"
     38 		"jnz 1b"
     39 		: /* no outputs */
     40 		:	"D"(ptr),
     41 			"b"(low),
     42 			"c"(high)
     43 		:	"ax","dx","memory");
     44 }
     45 
     46 static inline void __set_64bit_constant (unsigned long long *ptr,
     47 						 unsigned long long value)
     48 {
     49 	__set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
     50 }
     51 #define ll_low(x)	*(((unsigned int*)&(x))+0)
     52 #define ll_high(x)	*(((unsigned int*)&(x))+1)
     53 
     54 static inline void __set_64bit_var (unsigned long long *ptr,
     55 			 unsigned long long value)
     56 {
     57 	__set_64bit(ptr,ll_low(value), ll_high(value));
     58 }
     59 
     60 #define set_64bit(ptr,value) \
     61 (__builtin_constant_p(value) ? \
     62  __set_64bit_constant(ptr, value) : \
     63  __set_64bit_var(ptr, value) )
     64 
     65 #define _set_64bit(ptr,value) \
     66 (__builtin_constant_p(value) ? \
     67  __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
     68  __set_64bit(ptr, ll_low(value), ll_high(value)) )
     69 
     70 /*
     71  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
     72  * Note 2: xchg has side effect, so that attribute volatile is necessary,
     73  *	  but generally the primitive is invalid, *ptr is output argument. --ANK
     74  */
     75 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
     76 {
     77 	switch (size) {
     78 		case 1:
     79 			__asm__ __volatile__("xchgb %b0,%1"
     80 				:"=q" (x)
     81 				:"m" (*__xg(ptr)), "0" (x)
     82 				:"memory");
     83 			break;
     84 		case 2:
     85 			__asm__ __volatile__("xchgw %w0,%1"
     86 				:"=r" (x)
     87 				:"m" (*__xg(ptr)), "0" (x)
     88 				:"memory");
     89 			break;
     90 		case 4:
     91 			__asm__ __volatile__("xchgl %0,%1"
     92 				:"=r" (x)
     93 				:"m" (*__xg(ptr)), "0" (x)
     94 				:"memory");
     95 			break;
     96 	}
     97 	return x;
     98 }
     99 
    100 /*
    101  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
    102  * store NEW in MEM.  Return the initial value in MEM.  Success is
    103  * indicated by comparing RETURN with OLD.
    104  */
    105 
    106 #ifdef CONFIG_X86_CMPXCHG
    107 #define __HAVE_ARCH_CMPXCHG 1
    108 #define cmpxchg(ptr,o,n)\
    109 	((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
    110 					(unsigned long)(n),sizeof(*(ptr))))
    111 #define sync_cmpxchg(ptr,o,n)\
    112 	((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
    113 					(unsigned long)(n),sizeof(*(ptr))))
    114 #define cmpxchg_local(ptr,o,n)\
    115 	((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\
    116 					(unsigned long)(n),sizeof(*(ptr))))
    117 #endif
    118 
    119 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
    120 				      unsigned long new, int size)
    121 {
    122 	unsigned long prev;
    123 	switch (size) {
    124 	case 1:
    125 		__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
    126 				     : "=a"(prev)
    127 				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
    128 				     : "memory");
    129 		return prev;
    130 	case 2:
    131 		__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
    132 				     : "=a"(prev)
    133 				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
    134 				     : "memory");
    135 		return prev;
    136 	case 4:
    137 		__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
    138 				     : "=a"(prev)
    139 				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
    140 				     : "memory");
    141 		return prev;
    142 	}
    143 	return old;
    144 }
    145 
    146 /*
    147  * Always use locked operations when touching memory shared with a
    148  * hypervisor, since the system may be SMP even if the guest kernel
    149  * isn't.
    150  */
    151 static inline unsigned long __sync_cmpxchg(volatile void *ptr,
    152 					    unsigned long old,
    153 					    unsigned long new, int size)
    154 {
    155 	unsigned long prev;
    156 	switch (size) {
    157 	case 1:
    158 		__asm__ __volatile__("lock; cmpxchgb %b1,%2"
    159 				     : "=a"(prev)
    160 				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
    161 				     : "memory");
    162 		return prev;
    163 	case 2:
    164 		__asm__ __volatile__("lock; cmpxchgw %w1,%2"
    165 				     : "=a"(prev)
    166 				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
    167 				     : "memory");
    168 		return prev;
    169 	case 4:
    170 		__asm__ __volatile__("lock; cmpxchgl %1,%2"
    171 				     : "=a"(prev)
    172 				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
    173 				     : "memory");
    174 		return prev;
    175 	}
    176 	return old;
    177 }
    178 
    179 static inline unsigned long __cmpxchg_local(volatile void *ptr,
    180 			unsigned long old, unsigned long new, int size)
    181 {
    182 	unsigned long prev;
    183 	switch (size) {
    184 	case 1:
    185 		__asm__ __volatile__("cmpxchgb %b1,%2"
    186 				     : "=a"(prev)
    187 				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
    188 				     : "memory");
    189 		return prev;
    190 	case 2:
    191 		__asm__ __volatile__("cmpxchgw %w1,%2"
    192 				     : "=a"(prev)
    193 				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
    194 				     : "memory");
    195 		return prev;
    196 	case 4:
    197 		__asm__ __volatile__("cmpxchgl %1,%2"
    198 				     : "=a"(prev)
    199 				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
    200 				     : "memory");
    201 		return prev;
    202 	}
    203 	return old;
    204 }
    205 
    206 #ifndef CONFIG_X86_CMPXCHG
    207 /*
    208  * Building a kernel capable running on 80386. It may be necessary to
    209  * simulate the cmpxchg on the 80386 CPU. For that purpose we define
    210  * a function for each of the sizes we support.
    211  */
    212 
    213 extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
    214 extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
    215 extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
    216 
    217 static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
    218 				      unsigned long new, int size)
    219 {
    220 	switch (size) {
    221 	case 1:
    222 		return cmpxchg_386_u8(ptr, old, new);
    223 	case 2:
    224 		return cmpxchg_386_u16(ptr, old, new);
    225 	case 4:
    226 		return cmpxchg_386_u32(ptr, old, new);
    227 	}
    228 	return old;
    229 }
    230 
    231 #define cmpxchg(ptr,o,n)						\
    232 ({									\
    233 	__typeof__(*(ptr)) __ret;					\
    234 	if (likely(boot_cpu_data.x86 > 3))				\
    235 		__ret = __cmpxchg((ptr), (unsigned long)(o),		\
    236 					(unsigned long)(n), sizeof(*(ptr))); \
    237 	else								\
    238 		__ret = cmpxchg_386((ptr), (unsigned long)(o),		\
    239 					(unsigned long)(n), sizeof(*(ptr))); \
    240 	__ret;								\
    241 })
    242 #define cmpxchg_local(ptr,o,n)						\
    243 ({									\
    244 	__typeof__(*(ptr)) __ret;					\
    245 	if (likely(boot_cpu_data.x86 > 3))				\
    246 		__ret = __cmpxchg_local((ptr), (unsigned long)(o),	\
    247 					(unsigned long)(n), sizeof(*(ptr))); \
    248 	else								\
    249 		__ret = cmpxchg_386((ptr), (unsigned long)(o),		\
    250 					(unsigned long)(n), sizeof(*(ptr))); \
    251 	__ret;								\
    252 })
    253 #endif
    254 
    255 static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
    256 				      unsigned long long new)
    257 {
    258 	unsigned long long prev;
    259 	__asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
    260 			     : "=A"(prev)
    261 			     : "b"((unsigned long)new),
    262 			       "c"((unsigned long)(new >> 32)),
    263 			       "m"(*__xg(ptr)),
    264 			       "0"(old)
    265 			     : "memory");
    266 	return prev;
    267 }
    268 
    269 static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
    270 			unsigned long long old, unsigned long long new)
    271 {
    272 	unsigned long long prev;
    273 	__asm__ __volatile__("cmpxchg8b %3"
    274 			     : "=A"(prev)
    275 			     : "b"((unsigned long)new),
    276 			       "c"((unsigned long)(new >> 32)),
    277 			       "m"(*__xg(ptr)),
    278 			       "0"(old)
    279 			     : "memory");
    280 	return prev;
    281 }
    282 
    283 #define cmpxchg64(ptr,o,n)\
    284 	((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
    285 					(unsigned long long)(n)))
    286 #define cmpxchg64_local(ptr,o,n)\
    287 	((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\
    288 					(unsigned long long)(n)))
    289 #endif
    290