Home | History | Annotate | Download | only in asm-x86
      1 #ifndef __ASM_SYSTEM_H
      2 #define __ASM_SYSTEM_H
      3 
      4 #include <linux/kernel.h>
      5 #include <asm/segment.h>
      6 #include <asm/cpufeature.h>
      7 #include <asm/cmpxchg.h>
      8 
      9 #ifdef __KERNEL__
     10 #define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */
     11 
     12 struct task_struct;	/* one of the stranger aspects of C forward declarations.. */
     13 extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
     14 
     15 /*
     16  * Saving eflags is important. It switches not only IOPL between tasks,
     17  * it also protects other tasks from NT leaking through sysenter etc.
     18  */
     19 #define switch_to(prev,next,last) do {					\
     20 	unsigned long esi,edi;						\
     21 	asm volatile("pushfl\n\t"		/* Save flags */	\
     22 		     "pushl %%ebp\n\t"					\
     23 		     "movl %%esp,%0\n\t"	/* save ESP */		\
     24 		     "movl %5,%%esp\n\t"	/* restore ESP */	\
     25 		     "movl $1f,%1\n\t"		/* save EIP */		\
     26 		     "pushl %6\n\t"		/* restore EIP */	\
     27 		     "jmp __switch_to\n"				\
     28 		     "1:\t"						\
     29 		     "popl %%ebp\n\t"					\
     30 		     "popfl"						\
     31 		     :"=m" (prev->thread.esp),"=m" (prev->thread.eip),	\
     32 		      "=a" (last),"=S" (esi),"=D" (edi)			\
     33 		     :"m" (next->thread.esp),"m" (next->thread.eip),	\
     34 		      "2" (prev), "d" (next));				\
     35 } while (0)
     36 
     37 #define _set_base(addr,base) do { unsigned long __pr; \
     38 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
     39 	"rorl $16,%%edx\n\t" \
     40 	"movb %%dl,%2\n\t" \
     41 	"movb %%dh,%3" \
     42 	:"=&d" (__pr) \
     43 	:"m" (*((addr)+2)), \
     44 	 "m" (*((addr)+4)), \
     45 	 "m" (*((addr)+7)), \
     46          "0" (base) \
     47         ); } while(0)
     48 
     49 #define _set_limit(addr,limit) do { unsigned long __lr; \
     50 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
     51 	"rorl $16,%%edx\n\t" \
     52 	"movb %2,%%dh\n\t" \
     53 	"andb $0xf0,%%dh\n\t" \
     54 	"orb %%dh,%%dl\n\t" \
     55 	"movb %%dl,%2" \
     56 	:"=&d" (__lr) \
     57 	:"m" (*(addr)), \
     58 	 "m" (*((addr)+6)), \
     59 	 "0" (limit) \
     60         ); } while(0)
     61 
     62 #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
     63 #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
     64 
     65 /*
     66  * Load a segment. Fall back on loading the zero
     67  * segment if something goes wrong..
     68  */
     69 #define loadsegment(seg,value)			\
     70 	asm volatile("\n"			\
     71 		"1:\t"				\
     72 		"mov %0,%%" #seg "\n"		\
     73 		"2:\n"				\
     74 		".section .fixup,\"ax\"\n"	\
     75 		"3:\t"				\
     76 		"pushl $0\n\t"			\
     77 		"popl %%" #seg "\n\t"		\
     78 		"jmp 2b\n"			\
     79 		".previous\n"			\
     80 		".section __ex_table,\"a\"\n\t"	\
     81 		".align 4\n\t"			\
     82 		".long 1b,3b\n"			\
     83 		".previous"			\
     84 		: :"rm" (value))
     85 
     86 /*
     87  * Save a segment register away
     88  */
     89 #define savesegment(seg, value) \
     90 	asm volatile("mov %%" #seg ",%0":"=rm" (value))
     91 
     92 
     93 static inline void native_clts(void)
     94 {
     95 	asm volatile ("clts");
     96 }
     97 
     98 static inline unsigned long native_read_cr0(void)
     99 {
    100 	unsigned long val;
    101 	asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
    102 	return val;
    103 }
    104 
    105 static inline void native_write_cr0(unsigned long val)
    106 {
    107 	asm volatile("movl %0,%%cr0": :"r" (val));
    108 }
    109 
    110 static inline unsigned long native_read_cr2(void)
    111 {
    112 	unsigned long val;
    113 	asm volatile("movl %%cr2,%0\n\t" :"=r" (val));
    114 	return val;
    115 }
    116 
    117 static inline void native_write_cr2(unsigned long val)
    118 {
    119 	asm volatile("movl %0,%%cr2": :"r" (val));
    120 }
    121 
    122 static inline unsigned long native_read_cr3(void)
    123 {
    124 	unsigned long val;
    125 	asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
    126 	return val;
    127 }
    128 
    129 static inline void native_write_cr3(unsigned long val)
    130 {
    131 	asm volatile("movl %0,%%cr3": :"r" (val));
    132 }
    133 
    134 static inline unsigned long native_read_cr4(void)
    135 {
    136 	unsigned long val;
    137 	asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
    138 	return val;
    139 }
    140 
    141 static inline unsigned long native_read_cr4_safe(void)
    142 {
    143 	unsigned long val;
    144 	/* This could fault if %cr4 does not exist */
    145 	asm volatile("1: movl %%cr4, %0		\n"
    146 		"2:				\n"
    147 		".section __ex_table,\"a\"	\n"
    148 		".long 1b,2b			\n"
    149 		".previous			\n"
    150 		: "=r" (val): "0" (0));
    151 	return val;
    152 }
    153 
    154 static inline void native_write_cr4(unsigned long val)
    155 {
    156 	asm volatile("movl %0,%%cr4": :"r" (val));
    157 }
    158 
    159 static inline void native_wbinvd(void)
    160 {
    161 	asm volatile("wbinvd": : :"memory");
    162 }
    163 
    164 static inline void clflush(volatile void *__p)
    165 {
    166 	asm volatile("clflush %0" : "+m" (*(char __force *)__p));
    167 }
    168 
    169 #ifdef CONFIG_PARAVIRT
    170 #include <asm/paravirt.h>
    171 #else
    172 #define read_cr0()	(native_read_cr0())
    173 #define write_cr0(x)	(native_write_cr0(x))
    174 #define read_cr2()	(native_read_cr2())
    175 #define write_cr2(x)	(native_write_cr2(x))
    176 #define read_cr3()	(native_read_cr3())
    177 #define write_cr3(x)	(native_write_cr3(x))
    178 #define read_cr4()	(native_read_cr4())
    179 #define read_cr4_safe()	(native_read_cr4_safe())
    180 #define write_cr4(x)	(native_write_cr4(x))
    181 #define wbinvd()	(native_wbinvd())
    182 
    183 /* Clear the 'TS' bit */
    184 #define clts()		(native_clts())
    185 
    186 #endif/* CONFIG_PARAVIRT */
    187 
    188 /* Set the 'TS' bit */
    189 #define stts() write_cr0(8 | read_cr0())
    190 
    191 #endif	/* __KERNEL__ */
    192 
    193 static inline unsigned long get_limit(unsigned long segment)
    194 {
    195 	unsigned long __limit;
    196 	__asm__("lsll %1,%0"
    197 		:"=r" (__limit):"r" (segment));
    198 	return __limit+1;
    199 }
    200 
    201 #define nop() __asm__ __volatile__ ("nop")
    202 
    203 /*
    204  * Force strict CPU ordering.
    205  * And yes, this is required on UP too when we're talking
    206  * to devices.
    207  *
    208  * For now, "wmb()" doesn't actually do anything, as all
    209  * Intel CPU's follow what Intel calls a *Processor Order*,
    210  * in which all writes are seen in the program order even
    211  * outside the CPU.
    212  *
    213  * I expect future Intel CPU's to have a weaker ordering,
    214  * but I'd also expect them to finally get their act together
    215  * and add some real memory barriers if so.
    216  *
    217  * Some non intel clones support out of order store. wmb() ceases to be a
    218  * nop for these.
    219  */
    220 
    221 
    222 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
    223 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
    224 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
    225 
    226 /**
    227  * read_barrier_depends - Flush all pending reads that subsequents reads
    228  * depend on.
    229  *
    230  * No data-dependent reads from memory-like regions are ever reordered
    231  * over this barrier.  All reads preceding this primitive are guaranteed
    232  * to access memory (but not necessarily other CPUs' caches) before any
    233  * reads following this primitive that depend on the data return by
    234  * any of the preceding reads.  This primitive is much lighter weight than
    235  * rmb() on most CPUs, and is never heavier weight than is
    236  * rmb().
    237  *
    238  * These ordering constraints are respected by both the local CPU
    239  * and the compiler.
    240  *
    241  * Ordering is not guaranteed by anything other than these primitives,
    242  * not even by data dependencies.  See the documentation for
    243  * memory_barrier() for examples and URLs to more information.
    244  *
    245  * For example, the following code would force ordering (the initial
    246  * value of "a" is zero, "b" is one, and "p" is "&a"):
    247  *
    248  * <programlisting>
    249  *	CPU 0				CPU 1
    250  *
    251  *	b = 2;
    252  *	memory_barrier();
    253  *	p = &b;				q = p;
    254  *					read_barrier_depends();
    255  *					d = *q;
    256  * </programlisting>
    257  *
    258  * because the read of "*q" depends on the read of "p" and these
    259  * two reads are separated by a read_barrier_depends().  However,
    260  * the following code, with the same initial values for "a" and "b":
    261  *
    262  * <programlisting>
    263  *	CPU 0				CPU 1
    264  *
    265  *	a = 2;
    266  *	memory_barrier();
    267  *	b = 3;				y = b;
    268  *					read_barrier_depends();
    269  *					x = a;
    270  * </programlisting>
    271  *
    272  * does not enforce ordering, since there is no data dependency between
    273  * the read of "a" and the read of "b".  Therefore, on some CPUs, such
    274  * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
    275  * in cases like this where there are no data dependencies.
    276  **/
    277 
    278 #define read_barrier_depends()	do { } while(0)
    279 
    280 #ifdef CONFIG_SMP
    281 #define smp_mb()	mb()
    282 #ifdef CONFIG_X86_PPRO_FENCE
    283 # define smp_rmb()	rmb()
    284 #else
    285 # define smp_rmb()	barrier()
    286 #endif
    287 #ifdef CONFIG_X86_OOSTORE
    288 # define smp_wmb() 	wmb()
    289 #else
    290 # define smp_wmb()	barrier()
    291 #endif
    292 #define smp_read_barrier_depends()	read_barrier_depends()
    293 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
    294 #else
    295 #define smp_mb()	barrier()
    296 #define smp_rmb()	barrier()
    297 #define smp_wmb()	barrier()
    298 #define smp_read_barrier_depends()	do { } while(0)
    299 #define set_mb(var, value) do { var = value; barrier(); } while (0)
    300 #endif
    301 
    302 #include <linux/irqflags.h>
    303 
    304 /*
    305  * disable hlt during certain critical i/o operations
    306  */
    307 #define HAVE_DISABLE_HLT
    308 void disable_hlt(void);
    309 void enable_hlt(void);
    310 
    311 extern int es7000_plat;
    312 void cpu_idle_wait(void);
    313 
    314 extern unsigned long arch_align_stack(unsigned long sp);
    315 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
    316 
    317 void default_idle(void);
    318 void __show_registers(struct pt_regs *, int all);
    319 
    320 #endif
    321