Home | History | Annotate | Download | only in asm-arm
      1 #ifndef __ASM_ARM_SYSTEM_H
      2 #define __ASM_ARM_SYSTEM_H
      3 
      4 #ifdef __KERNEL__
      5 
      6 
      7 #define CPU_ARCH_UNKNOWN	0
      8 #define CPU_ARCH_ARMv3		1
      9 #define CPU_ARCH_ARMv4		2
     10 #define CPU_ARCH_ARMv4T		3
     11 #define CPU_ARCH_ARMv5		4
     12 #define CPU_ARCH_ARMv5T		5
     13 #define CPU_ARCH_ARMv5TE	6
     14 #define CPU_ARCH_ARMv5TEJ	7
     15 #define CPU_ARCH_ARMv6		8
     16 
     17 /*
     18  * CR1 bits (CP#15 CR1)
     19  */
     20 #define CR_M	(1 << 0)	/* MMU enable				*/
     21 #define CR_A	(1 << 1)	/* Alignment abort enable		*/
     22 #define CR_C	(1 << 2)	/* Dcache enable			*/
     23 #define CR_W	(1 << 3)	/* Write buffer enable			*/
     24 #define CR_P	(1 << 4)	/* 32-bit exception handler		*/
     25 #define CR_D	(1 << 5)	/* 32-bit data address range		*/
     26 #define CR_L	(1 << 6)	/* Implementation defined		*/
     27 #define CR_B	(1 << 7)	/* Big endian				*/
     28 #define CR_S	(1 << 8)	/* System MMU protection		*/
     29 #define CR_R	(1 << 9)	/* ROM MMU protection			*/
     30 #define CR_F	(1 << 10)	/* Implementation defined		*/
     31 #define CR_Z	(1 << 11)	/* Implementation defined		*/
     32 #define CR_I	(1 << 12)	/* Icache enable			*/
     33 #define CR_V	(1 << 13)	/* Vectors relocated to 0xffff0000	*/
     34 #define CR_RR	(1 << 14)	/* Round Robin cache replacement	*/
     35 #define CR_L4	(1 << 15)	/* LDR pc can set T bit			*/
     36 #define CR_DT	(1 << 16)
     37 #define CR_IT	(1 << 18)
     38 #define CR_ST	(1 << 19)
     39 #define CR_FI	(1 << 21)	/* Fast interrupt (lower latency mode)	*/
     40 #define CR_U	(1 << 22)	/* Unaligned access operation		*/
     41 #define CR_XP	(1 << 23)	/* Extended page tables			*/
     42 #define CR_VE	(1 << 24)	/* Vectored interrupts			*/
     43 
     44 #define CPUID_ID	0
     45 #define CPUID_CACHETYPE	1
     46 #define CPUID_TCM	2
     47 #define CPUID_TLBTYPE	3
     48 
     49 #define read_cpuid(reg)							\
     50 	({								\
     51 		unsigned int __val;					\
     52 		asm("mrc	p15, 0, %0, c0, c0, " __stringify(reg)	\
     53 		    : "=r" (__val)					\
     54 		    :							\
     55 		    : "cc");						\
     56 		__val;							\
     57 	})
     58 
     59 /*
     60  * This is used to ensure the compiler did actually allocate the register we
     61  * asked it for some inline assembly sequences.  Apparently we can't trust
     62  * the compiler from one version to another so a bit of paranoia won't hurt.
     63  * This string is meant to be concatenated with the inline asm string and
     64  * will cause compilation to stop on mismatch.
     65  * (for details, see gcc PR 15089)
     66  */
     67 #define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"
     68 
     69 #ifndef __ASSEMBLY__
     70 
     71 #include <linux/linkage.h>
     72 
     73 struct thread_info;
     74 struct task_struct;
     75 
     76 /* information about the system we're running on */
     77 extern unsigned int system_rev;
     78 extern unsigned int system_serial_low;
     79 extern unsigned int system_serial_high;
     80 extern unsigned int mem_fclk_21285;
     81 
     82 struct pt_regs;
     83 
     84 void die(const char *msg, struct pt_regs *regs, int err)
     85 		__attribute__((noreturn));
     86 
     87 struct siginfo;
     88 void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
     89 		unsigned long err, unsigned long trap);
     90 
     91 void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
     92 				       struct pt_regs *),
     93 		     int sig, const char *name);
     94 
     95 #define xchg(ptr,x) \
     96 	((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
     97 
     98 #define tas(ptr) (xchg((ptr),1))
     99 
    100 extern asmlinkage void __backtrace(void);
    101 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
    102 
    103 struct mm_struct;
    104 extern void show_pte(struct mm_struct *mm, unsigned long addr);
    105 extern void __show_regs(struct pt_regs *);
    106 
    107 extern int cpu_architecture(void);
    108 extern void cpu_init(void);
    109 
    110 void arm_machine_restart(char mode);
    111 extern void (*arm_pm_restart)(char str);
    112 
    113 /*
    114  * Intel's XScale3 core supports some v6 features (supersections, L2)
    115  * but advertises itself as v5 as it does not support the v6 ISA.  For
    116  * this reason, we need a way to explicitly test for this type of CPU.
    117  */
    118 #ifndef CONFIG_CPU_XSC3
    119 #define cpu_is_xsc3()	0
    120 #else
    121 static inline int cpu_is_xsc3(void)
    122 {
    123 	extern unsigned int processor_id;
    124 
    125 	if ((processor_id & 0xffffe000) == 0x69056000)
    126 		return 1;
    127 
    128 	return 0;
    129 }
    130 #endif
    131 
    132 #if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
    133 #define	cpu_is_xscale()	0
    134 #else
    135 #define	cpu_is_xscale()	1
    136 #endif
    137 
    138 #define set_cr(x)					\
    139 	__asm__ __volatile__(				\
    140 	"mcr	p15, 0, %0, c1, c0, 0	@ set CR"	\
    141 	: : "r" (x) : "cc")
    142 
    143 #define get_cr()					\
    144 	({						\
    145 	unsigned int __val;				\
    146 	__asm__ __volatile__(				\
    147 	"mrc	p15, 0, %0, c1, c0, 0	@ get CR"	\
    148 	: "=r" (__val) : : "cc");			\
    149 	__val;						\
    150 	})
    151 
    152 extern unsigned long cr_no_alignment;	/* defined in entry-armv.S */
    153 extern unsigned long cr_alignment;	/* defined in entry-armv.S */
    154 
    155 #define UDBG_UNDEFINED	(1 << 0)
    156 #define UDBG_SYSCALL	(1 << 1)
    157 #define UDBG_BADABORT	(1 << 2)
    158 #define UDBG_SEGV	(1 << 3)
    159 #define UDBG_BUS	(1 << 4)
    160 
    161 extern unsigned int user_debug;
    162 
    163 #if __LINUX_ARM_ARCH__ >= 4
    164 #define vectors_high()	(cr_alignment & CR_V)
    165 #else
    166 #define vectors_high()	(0)
    167 #endif
    168 
    169 #if __LINUX_ARM_ARCH__ >= 6
    170 #define mb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
    171                                    : : "r" (0) : "memory")
    172 #else
    173 #define mb() __asm__ __volatile__ ("" : : : "memory")
    174 #endif
    175 #define rmb() mb()
    176 #define wmb() mb()
    177 #define read_barrier_depends() do { } while(0)
    178 #define set_mb(var, value)  do { var = value; mb(); } while (0)
    179 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
    180 
    181 /*
    182  * switch_mm() may do a full cache flush over the context switch,
    183  * so enable interrupts over the context switch to avoid high
    184  * latency.
    185  */
    186 #define __ARCH_WANT_INTERRUPTS_ON_CTXSW
    187 
    188 /*
    189  * switch_to(prev, next) should switch from task `prev' to `next'
    190  * `prev' will never be the same as `next'.  schedule() itself
    191  * contains the memory barrier to tell GCC not to cache `current'.
    192  */
    193 extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
    194 
    195 #define switch_to(prev,next,last)					\
    196 do {									\
    197 	last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));	\
    198 } while (0)
    199 
    200 /*
    201  * On SMP systems, when the scheduler does migration-cost autodetection,
    202  * it needs a way to flush as much of the CPU's caches as possible.
    203  *
    204  * TODO: fill this in!
    205  */
    206 static inline void sched_cacheflush(void)
    207 {
    208 }
    209 
    210 /*
    211  * CPU interrupt mask handling.
    212  */
    213 #if __LINUX_ARM_ARCH__ >= 6
    214 
    215 #define local_irq_save(x)					\
    216 	({							\
    217 	__asm__ __volatile__(					\
    218 	"mrs	%0, cpsr		@ local_irq_save\n"	\
    219 	"cpsid	i"						\
    220 	: "=r" (x) : : "memory", "cc");				\
    221 	})
    222 
    223 #define local_irq_enable()  __asm__("cpsie i	@ __sti" : : : "memory", "cc")
    224 #define local_irq_disable() __asm__("cpsid i	@ __cli" : : : "memory", "cc")
    225 #define local_fiq_enable()  __asm__("cpsie f	@ __stf" : : : "memory", "cc")
    226 #define local_fiq_disable() __asm__("cpsid f	@ __clf" : : : "memory", "cc")
    227 
    228 #else
    229 
    230 /*
    231  * Save the current interrupt enable state & disable IRQs
    232  */
    233 #define local_irq_save(x)					\
    234 	({							\
    235 		unsigned long temp;				\
    236 		(void) (&temp == &x);				\
    237 	__asm__ __volatile__(					\
    238 	"mrs	%0, cpsr		@ local_irq_save\n"	\
    239 "	orr	%1, %0, #128\n"					\
    240 "	msr	cpsr_c, %1"					\
    241 	: "=r" (x), "=r" (temp)					\
    242 	:							\
    243 	: "memory", "cc");					\
    244 	})
    245 
    246 /*
    247  * Enable IRQs
    248  */
    249 #define local_irq_enable()					\
    250 	({							\
    251 		unsigned long temp;				\
    252 	__asm__ __volatile__(					\
    253 	"mrs	%0, cpsr		@ local_irq_enable\n"	\
    254 "	bic	%0, %0, #128\n"					\
    255 "	msr	cpsr_c, %0"					\
    256 	: "=r" (temp)						\
    257 	:							\
    258 	: "memory", "cc");					\
    259 	})
    260 
    261 /*
    262  * Disable IRQs
    263  */
    264 #define local_irq_disable()					\
    265 	({							\
    266 		unsigned long temp;				\
    267 	__asm__ __volatile__(					\
    268 	"mrs	%0, cpsr		@ local_irq_disable\n"	\
    269 "	orr	%0, %0, #128\n"					\
    270 "	msr	cpsr_c, %0"					\
    271 	: "=r" (temp)						\
    272 	:							\
    273 	: "memory", "cc");					\
    274 	})
    275 
    276 /*
    277  * Enable FIQs
    278  */
    279 #define local_fiq_enable()					\
    280 	({							\
    281 		unsigned long temp;				\
    282 	__asm__ __volatile__(					\
    283 	"mrs	%0, cpsr		@ stf\n"		\
    284 "	bic	%0, %0, #64\n"					\
    285 "	msr	cpsr_c, %0"					\
    286 	: "=r" (temp)						\
    287 	:							\
    288 	: "memory", "cc");					\
    289 	})
    290 
    291 /*
    292  * Disable FIQs
    293  */
    294 #define local_fiq_disable()					\
    295 	({							\
    296 		unsigned long temp;				\
    297 	__asm__ __volatile__(					\
    298 	"mrs	%0, cpsr		@ clf\n"		\
    299 "	orr	%0, %0, #64\n"					\
    300 "	msr	cpsr_c, %0"					\
    301 	: "=r" (temp)						\
    302 	:							\
    303 	: "memory", "cc");					\
    304 	})
    305 
    306 #endif
    307 
    308 /*
    309  * Save the current interrupt enable state.
    310  */
    311 #define local_save_flags(x)					\
    312 	({							\
    313 	__asm__ __volatile__(					\
    314 	"mrs	%0, cpsr		@ local_save_flags"	\
    315 	: "=r" (x) : : "memory", "cc");				\
    316 	})
    317 
    318 /*
    319  * restore saved IRQ & FIQ state
    320  */
    321 #define local_irq_restore(x)					\
    322 	__asm__ __volatile__(					\
    323 	"msr	cpsr_c, %0		@ local_irq_restore\n"	\
    324 	:							\
    325 	: "r" (x)						\
    326 	: "memory", "cc")
    327 
    328 #define irqs_disabled()			\
    329 ({					\
    330 	unsigned long flags;		\
    331 	local_save_flags(flags);	\
    332 	(int)(flags & PSR_I_BIT);	\
    333 })
    334 
    335 #ifdef CONFIG_SMP
    336 
    337 #define smp_mb()		mb()
    338 #define smp_rmb()		rmb()
    339 #define smp_wmb()		wmb()
    340 #define smp_read_barrier_depends()		read_barrier_depends()
    341 
    342 #else
    343 
    344 #define smp_mb()		barrier()
    345 #define smp_rmb()		barrier()
    346 #define smp_wmb()		barrier()
    347 #define smp_read_barrier_depends()		do { } while(0)
    348 
    349 #endif /* CONFIG_SMP */
    350 
    351 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
    352 /*
    353  * On the StrongARM, "swp" is terminally broken since it bypasses the
    354  * cache totally.  This means that the cache becomes inconsistent, and,
    355  * since we use normal loads/stores as well, this is really bad.
    356  * Typically, this causes oopsen in filp_close, but could have other,
    357  * more disasterous effects.  There are two work-arounds:
    358  *  1. Disable interrupts and emulate the atomic swap
    359  *  2. Clean the cache, perform atomic swap, flush the cache
    360  *
    361  * We choose (1) since its the "easiest" to achieve here and is not
    362  * dependent on the processor type.
    363  *
    364  * NOTE that this solution won't work on an SMP system, so explcitly
    365  * forbid it here.
    366  */
    367 #define swp_is_buggy
    368 #endif
    369 
    370 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
    371 {
    372 	extern void __bad_xchg(volatile void *, int);
    373 	unsigned long ret;
    374 #ifdef swp_is_buggy
    375 	unsigned long flags;
    376 #endif
    377 #if __LINUX_ARM_ARCH__ >= 6
    378 	unsigned int tmp;
    379 #endif
    380 
    381 	switch (size) {
    382 #if __LINUX_ARM_ARCH__ >= 6
    383 	case 1:
    384 		asm volatile("@	__xchg1\n"
    385 		"1:	ldrexb	%0, [%3]\n"
    386 		"	strexb	%1, %2, [%3]\n"
    387 		"	teq	%1, #0\n"
    388 		"	bne	1b"
    389 			: "=&r" (ret), "=&r" (tmp)
    390 			: "r" (x), "r" (ptr)
    391 			: "memory", "cc");
    392 		break;
    393 	case 4:
    394 		asm volatile("@	__xchg4\n"
    395 		"1:	ldrex	%0, [%3]\n"
    396 		"	strex	%1, %2, [%3]\n"
    397 		"	teq	%1, #0\n"
    398 		"	bne	1b"
    399 			: "=&r" (ret), "=&r" (tmp)
    400 			: "r" (x), "r" (ptr)
    401 			: "memory", "cc");
    402 		break;
    403 #elif defined(swp_is_buggy)
    404 #ifdef CONFIG_SMP
    405 #error SMP is not supported on this platform
    406 #endif
    407 	case 1:
    408 		local_irq_save(flags);
    409 		ret = *(volatile unsigned char *)ptr;
    410 		*(volatile unsigned char *)ptr = x;
    411 		local_irq_restore(flags);
    412 		break;
    413 
    414 	case 4:
    415 		local_irq_save(flags);
    416 		ret = *(volatile unsigned long *)ptr;
    417 		*(volatile unsigned long *)ptr = x;
    418 		local_irq_restore(flags);
    419 		break;
    420 #else
    421 	case 1:
    422 		asm volatile("@	__xchg1\n"
    423 		"	swpb	%0, %1, [%2]"
    424 			: "=&r" (ret)
    425 			: "r" (x), "r" (ptr)
    426 			: "memory", "cc");
    427 		break;
    428 	case 4:
    429 		asm volatile("@	__xchg4\n"
    430 		"	swp	%0, %1, [%2]"
    431 			: "=&r" (ret)
    432 			: "r" (x), "r" (ptr)
    433 			: "memory", "cc");
    434 		break;
    435 #endif
    436 	default:
    437 		__bad_xchg(ptr, size), ret = 0;
    438 		break;
    439 	}
    440 
    441 	return ret;
    442 }
    443 
    444 extern void disable_hlt(void);
    445 extern void enable_hlt(void);
    446 
    447 #endif /* __ASSEMBLY__ */
    448 
    449 #define arch_align_stack(x) (x)
    450 
    451 #endif /* __KERNEL__ */
    452 
    453 #endif
    454