Home | History | Annotate | Download | only in asm-arm
      1 /*
      2  *  linux/include/asm-arm/page.h
      3  *
      4  *  Copyright (C) 1995-2003 Russell King
      5  *
      6  * This program is free software; you can redistribute it and/or modify
      7  * it under the terms of the GNU General Public License version 2 as
      8  * published by the Free Software Foundation.
      9  */
     10 #ifndef _ASMARM_PAGE_H
     11 #define _ASMARM_PAGE_H
     12 
     13 
     14 /* PAGE_SHIFT determines the page size */
     15 #define PAGE_SHIFT		12
     16 #define PAGE_SIZE		(1UL << PAGE_SHIFT)
     17 #define PAGE_MASK		(~(PAGE_SIZE-1))
     18 
     19 #ifdef __KERNEL__
     20 
     21 /* to align the pointer to the (next) page boundary */
     22 #define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
     23 
     24 #ifndef __ASSEMBLY__
     25 
     26 #ifndef CONFIG_MMU
     27 
     28 #include "page-nommu.h"
     29 
     30 #else
     31 
     32 #include <asm/glue.h>
     33 
     34 /*
     35  *	User Space Model
     36  *	================
     37  *
     38  *	This section selects the correct set of functions for dealing with
     39  *	page-based copying and clearing for user space for the particular
     40  *	processor(s) we're building for.
     41  *
     42  *	We have the following to choose from:
     43  *	  v3		- ARMv3
     44  *	  v4wt		- ARMv4 with writethrough cache, without minicache
     45  *	  v4wb		- ARMv4 with writeback cache, without minicache
     46  *	  v4_mc		- ARMv4 with minicache
     47  *	  xscale	- Xscale
     48  *	  xsc3		- XScalev3
     49  */
     50 #undef _USER
     51 #undef MULTI_USER
     52 
     53 #ifdef CONFIG_CPU_COPY_V3
     54 # ifdef _USER
     55 #  define MULTI_USER 1
     56 # else
     57 #  define _USER v3
     58 # endif
     59 #endif
     60 
     61 #ifdef CONFIG_CPU_COPY_V4WT
     62 # ifdef _USER
     63 #  define MULTI_USER 1
     64 # else
     65 #  define _USER v4wt
     66 # endif
     67 #endif
     68 
     69 #ifdef CONFIG_CPU_COPY_V4WB
     70 # ifdef _USER
     71 #  define MULTI_USER 1
     72 # else
     73 #  define _USER v4wb
     74 # endif
     75 #endif
     76 
     77 #ifdef CONFIG_CPU_SA1100
     78 # ifdef _USER
     79 #  define MULTI_USER 1
     80 # else
     81 #  define _USER v4_mc
     82 # endif
     83 #endif
     84 
     85 #ifdef CONFIG_CPU_XSCALE
     86 # ifdef _USER
     87 #  define MULTI_USER 1
     88 # else
     89 #  define _USER xscale_mc
     90 # endif
     91 #endif
     92 
     93 #ifdef CONFIG_CPU_XSC3
     94 # ifdef _USER
     95 #  define MULTI_USER 1
     96 # else
     97 #  define _USER xsc3_mc
     98 # endif
     99 #endif
    100 
    101 #ifdef CONFIG_CPU_COPY_V6
    102 # define MULTI_USER 1
    103 #endif
    104 
    105 #if !defined(_USER) && !defined(MULTI_USER)
    106 #error Unknown user operations model
    107 #endif
    108 
    109 struct cpu_user_fns {
    110 	void (*cpu_clear_user_page)(void *p, unsigned long user);
    111 	void (*cpu_copy_user_page)(void *to, const void *from,
    112 				   unsigned long user);
    113 };
    114 
    115 #ifdef MULTI_USER
    116 extern struct cpu_user_fns cpu_user;
    117 
    118 #define __cpu_clear_user_page	cpu_user.cpu_clear_user_page
    119 #define __cpu_copy_user_page	cpu_user.cpu_copy_user_page
    120 
    121 #else
    122 
    123 #define __cpu_clear_user_page	__glue(_USER,_clear_user_page)
    124 #define __cpu_copy_user_page	__glue(_USER,_copy_user_page)
    125 
    126 extern void __cpu_clear_user_page(void *p, unsigned long user);
    127 extern void __cpu_copy_user_page(void *to, const void *from,
    128 				 unsigned long user);
    129 #endif
    130 
    131 #define clear_user_page(addr,vaddr,pg)	 __cpu_clear_user_page(addr, vaddr)
    132 #define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr)
    133 
    134 #define clear_page(page)	memzero((void *)(page), PAGE_SIZE)
    135 extern void copy_page(void *to, const void *from);
    136 
    137 #undef STRICT_MM_TYPECHECKS
    138 
    139 #ifdef STRICT_MM_TYPECHECKS
    140 /*
    141  * These are used to make use of C type-checking..
    142  */
    143 typedef struct { unsigned long pte; } pte_t;
    144 typedef struct { unsigned long pmd; } pmd_t;
    145 typedef struct { unsigned long pgd[2]; } pgd_t;
    146 typedef struct { unsigned long pgprot; } pgprot_t;
    147 
    148 #define pte_val(x)      ((x).pte)
    149 #define pmd_val(x)      ((x).pmd)
    150 #define pgd_val(x)	((x).pgd[0])
    151 #define pgprot_val(x)   ((x).pgprot)
    152 
    153 #define __pte(x)        ((pte_t) { (x) } )
    154 #define __pmd(x)        ((pmd_t) { (x) } )
    155 #define __pgprot(x)     ((pgprot_t) { (x) } )
    156 
    157 #else
    158 /*
    159  * .. while these make it easier on the compiler
    160  */
    161 typedef unsigned long pte_t;
    162 typedef unsigned long pmd_t;
    163 typedef unsigned long pgd_t[2];
    164 typedef unsigned long pgprot_t;
    165 
    166 #define pte_val(x)      (x)
    167 #define pmd_val(x)      (x)
    168 #define pgd_val(x)	((x)[0])
    169 #define pgprot_val(x)   (x)
    170 
    171 #define __pte(x)        (x)
    172 #define __pmd(x)        (x)
    173 #define __pgprot(x)     (x)
    174 
    175 #endif /* STRICT_MM_TYPECHECKS */
    176 
    177 /* the upper-most page table pointer */
    178 extern pmd_t *top_pmd;
    179 
    180 #endif /* CONFIG_MMU */
    181 
    182 #include <asm/memory.h>
    183 
    184 #endif /* !__ASSEMBLY__ */
    185 
    186 #define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \
    187 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
    188 
    189 /*
    190  * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
    191  */
    192 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
    193 #define ARCH_SLAB_MINALIGN 8
    194 #endif
    195 
    196 #include <asm-generic/page.h>
    197 
    198 #endif /* __KERNEL__ */
    199 
    200 #endif
    201