Home | History | Annotate | Download | only in asm-mips
      1 /*
      2  * This file is subject to the terms and conditions of the GNU General Public
      3  * License.  See the file "COPYING" in the main directory of this archive
      4  * for more details.
      5  *
      6  * Copyright (C) 2006 by Ralf Baechle (ralf (at) linux-mips.org)
      7  */
      8 #ifndef __ASM_BARRIER_H
      9 #define __ASM_BARRIER_H
     10 
     11 /*
     12  * read_barrier_depends - Flush all pending reads that subsequents reads
     13  * depend on.
     14  *
     15  * No data-dependent reads from memory-like regions are ever reordered
     16  * over this barrier.  All reads preceding this primitive are guaranteed
     17  * to access memory (but not necessarily other CPUs' caches) before any
     18  * reads following this primitive that depend on the data return by
     19  * any of the preceding reads.  This primitive is much lighter weight than
     20  * rmb() on most CPUs, and is never heavier weight than is
     21  * rmb().
     22  *
     23  * These ordering constraints are respected by both the local CPU
     24  * and the compiler.
     25  *
     26  * Ordering is not guaranteed by anything other than these primitives,
     27  * not even by data dependencies.  See the documentation for
     28  * memory_barrier() for examples and URLs to more information.
     29  *
     30  * For example, the following code would force ordering (the initial
     31  * value of "a" is zero, "b" is one, and "p" is "&a"):
     32  *
     33  * <programlisting>
     34  *	CPU 0				CPU 1
     35  *
     36  *	b = 2;
     37  *	memory_barrier();
     38  *	p = &b;				q = p;
     39  *					read_barrier_depends();
     40  *					d = *q;
     41  * </programlisting>
     42  *
     43  * because the read of "*q" depends on the read of "p" and these
     44  * two reads are separated by a read_barrier_depends().  However,
     45  * the following code, with the same initial values for "a" and "b":
     46  *
     47  * <programlisting>
     48  *	CPU 0				CPU 1
     49  *
     50  *	a = 2;
     51  *	memory_barrier();
     52  *	b = 3;				y = b;
     53  *					read_barrier_depends();
     54  *					x = a;
     55  * </programlisting>
     56  *
     57  * does not enforce ordering, since there is no data dependency between
     58  * the read of "a" and the read of "b".  Therefore, on some CPUs, such
     59  * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
     60  * in cases like this where there are no data dependencies.
     61  */
     62 
     63 #define read_barrier_depends()		do { } while(0)
     64 #define smp_read_barrier_depends()	do { } while(0)
     65 
     66 #ifdef CONFIG_CPU_HAS_SYNC
     67 #define __sync()				\
     68 	__asm__ __volatile__(			\
     69 		".set	push\n\t"		\
     70 		".set	noreorder\n\t"		\
     71 		".set	mips2\n\t"		\
     72 		"sync\n\t"			\
     73 		".set	pop"			\
     74 		: /* no output */		\
     75 		: /* no input */		\
     76 		: "memory")
     77 #else
     78 #define __sync()	do { } while(0)
     79 #endif
     80 
     81 #define __fast_iob()				\
     82 	__asm__ __volatile__(			\
     83 		".set	push\n\t"		\
     84 		".set	noreorder\n\t"		\
     85 		"lw	$0,%0\n\t"		\
     86 		"nop\n\t"			\
     87 		".set	pop"			\
     88 		: /* no output */		\
     89 		: "m" (*(int *)CKSEG1)		\
     90 		: "memory")
     91 
     92 #define fast_wmb()	__sync()
     93 #define fast_rmb()	__sync()
     94 #define fast_mb()	__sync()
     95 #ifdef CONFIG_SGI_IP28
     96 #define fast_iob()				\
     97 	__asm__ __volatile__(			\
     98 		".set	push\n\t"		\
     99 		".set	noreorder\n\t"		\
    100 		"lw	$0,%0\n\t"		\
    101 		"sync\n\t"			\
    102 		"lw	$0,%0\n\t"		\
    103 		".set	pop"			\
    104 		: /* no output */		\
    105 		: "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \
    106 		: "memory")
    107 #else
    108 #define fast_iob()				\
    109 	do {					\
    110 		__sync();			\
    111 		__fast_iob();			\
    112 	} while (0)
    113 #endif
    114 
    115 #ifdef CONFIG_CPU_HAS_WB
    116 
    117 #include <asm/wbflush.h>
    118 
    119 #define wmb()		fast_wmb()
    120 #define rmb()		fast_rmb()
    121 #define mb()		wbflush()
    122 #define iob()		wbflush()
    123 
    124 #else /* !CONFIG_CPU_HAS_WB */
    125 
    126 #define wmb()		fast_wmb()
    127 #define rmb()		fast_rmb()
    128 #define mb()		fast_mb()
    129 #define iob()		fast_iob()
    130 
    131 #endif /* !CONFIG_CPU_HAS_WB */
    132 
    133 #if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
    134 #define __WEAK_ORDERING_MB	"       sync	\n"
    135 #else
    136 #define __WEAK_ORDERING_MB	"		\n"
    137 #endif
    138 #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
    139 #define __WEAK_LLSC_MB		"       sync	\n"
    140 #else
    141 #define __WEAK_LLSC_MB		"		\n"
    142 #endif
    143 
    144 #define smp_mb()	__asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
    145 #define smp_rmb()	__asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
    146 #define smp_wmb()	__asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
    147 
    148 #define set_mb(var, value) \
    149 	do { var = value; smp_mb(); } while (0)
    150 
    151 #define smp_llsc_mb()	__asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
    152 #define smp_llsc_rmb()	__asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
    153 #define smp_llsc_wmb()	__asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
    154 
    155 #endif /* __ASM_BARRIER_H */
    156