Home | History | Annotate | Download | only in include
      1 /*
      2  * Copyright (c) 2016 Cyril Hrubis <chrubis (at) suse.cz>
      3  *
      4  * This program is free software: you can redistribute it and/or modify
      5  * it under the terms of the GNU General Public License as published by
      6  * the Free Software Foundation, either version 2 of the License, or
      7  * (at your option) any later version.
      8  *
      9  * This program is distributed in the hope that it will be useful,
     10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     12  * GNU General Public License for more details.
     13  *
     14  * You should have received a copy of the GNU General Public License
     15  * along with this program. If not, see <http://www.gnu.org/licenses/>.
     16  */
     17 
     18 #ifndef TST_ATOMIC_H__
     19 #define TST_ATOMIC_H__
     20 
     21 #include "config.h"
     22 
     23 #if HAVE_SYNC_ADD_AND_FETCH == 1
     24 static inline int tst_atomic_add_return(int i, int *v)
     25 {
     26 	return __sync_add_and_fetch(v, i);
     27 }
     28 
     29 #elif defined(__i386__) || defined(__x86_64__)
     30 static inline int tst_atomic_add_return(int i, int *v)
     31 {
     32 	int __ret = i;
     33 
     34 	/*
     35 	 * taken from arch/x86/include/asm/cmpxchg.h
     36 	 * Since we always pass int sized parameter, we can simplify it
     37 	 * and cherry-pick only that specific case.
     38 	 *
     39 	switch (sizeof(*v)) {
     40 	case 1:
     41 		asm volatile ("lock; xaddb %b0, %1\n"
     42 			: "+q" (__ret), "+m" (*v) : : "memory", "cc");
     43 		break;
     44 	case 2:
     45 		asm volatile ("lock; xaddw %w0, %1\n"
     46 			: "+r" (__ret), "+m" (*v) : : "memory", "cc");
     47 		break;
     48 	case 4:
     49 		asm volatile ("lock; xaddl %0, %1\n"
     50 			: "+r" (__ret), "+m" (*v) : : "memory", "cc");
     51 		break;
     52 	case 8:
     53 		asm volatile ("lock; xaddq %q0, %1\n"
     54 			: "+r" (__ret), "+m" (*v) : : "memory", "cc");
     55 		break;
     56 	default:
     57 		__xadd_wrong_size();
     58 	}
     59 	*/
     60 	asm volatile ("lock; xaddl %0, %1\n"
     61 		: "+r" (__ret), "+m" (*v) : : "memory", "cc");
     62 
     63 	return i + __ret;
     64 }
     65 
     66 #elif defined(__powerpc__) || defined(__powerpc64__)
     67 static inline int tst_atomic_add_return(int i, int *v)
     68 {
     69 	int t;
     70 
     71 	/* taken from arch/powerpc/include/asm/atomic.h */
     72 	asm volatile(
     73 		"	sync\n"
     74 		"1:	lwarx	%0,0,%2		# atomic_add_return\n"
     75 		"	add %0,%1,%0\n"
     76 		"	stwcx.	%0,0,%2 \n"
     77 		"	bne-	1b\n"
     78 		"	sync\n"
     79 		: "=&r" (t)
     80 		: "r" (i), "r" (v)
     81 		: "cc", "memory");
     82 
     83 	return t;
     84 }
     85 
     86 #elif defined(__s390__) || defined(__s390x__)
     87 static inline int tst_atomic_add_return(int i, int *v)
     88 {
     89 	int old_val, new_val;
     90 
     91 	/* taken from arch/s390/include/asm/atomic.h */
     92 	asm volatile(
     93 		"	l	%0,%2\n"
     94 		"0:	lr	%1,%0\n"
     95 		"	ar	%1,%3\n"
     96 		"	cs	%0,%1,%2\n"
     97 		"	jl	0b"
     98 		: "=&d" (old_val), "=&d" (new_val), "+Q" (*v)
     99 		: "d" (i)
    100 		: "cc", "memory");
    101 
    102 	return old_val + i;
    103 }
    104 #else /* HAVE_SYNC_ADD_AND_FETCH == 1 */
    105 # error Your compiler does not provide __sync_add_and_fetch and LTP\
    106 	implementation is missing for your architecture.
    107 #endif
    108 
    109 static inline int tst_atomic_inc(int *v)
    110 {
    111 	return tst_atomic_add_return(1, v);
    112 }
    113 
    114 #endif	/* TST_ATOMIC_H__ */
    115