1 /* 2 * linux/include/asm-arm/pgtable.h 3 * 4 * Copyright (C) 1995-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #ifndef _ASMARM_PGTABLE_H 11 #define _ASMARM_PGTABLE_H 12 13 #include <asm-generic/4level-fixup.h> 14 #include <asm/proc-fns.h> 15 16 #ifndef CONFIG_MMU 17 18 #include "pgtable-nommu.h" 19 20 #else 21 22 #include <asm/memory.h> 23 #include <asm/arch/vmalloc.h> 24 25 /* 26 * Just any arbitrary offset to the start of the vmalloc VM area: the 27 * current 8MB value just means that there will be a 8MB "hole" after the 28 * physical memory until the kernel virtual memory starts. That means that 29 * any out-of-bounds memory accesses will hopefully be caught. 30 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 31 * area for the same reason. ;) 32 * 33 * Note that platforms may override VMALLOC_START, but they must provide 34 * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space, 35 * which may not overlap IO space. 36 */ 37 #ifndef VMALLOC_START 38 #define VMALLOC_OFFSET (8*1024*1024) 39 #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) 40 #endif 41 42 /* 43 * Hardware-wise, we have a two level page table structure, where the first 44 * level has 4096 entries, and the second level has 256 entries. Each entry 45 * is one 32-bit word. Most of the bits in the second level entry are used 46 * by hardware, and there aren't any "accessed" and "dirty" bits. 47 * 48 * Linux on the other hand has a three level page table structure, which can 49 * be wrapped to fit a two level page table structure easily - using the PGD 50 * and PTE only. However, Linux also expects one "PTE" table per page, and 51 * at least a "dirty" bit. 52 * 53 * Therefore, we tweak the implementation slightly - we tell Linux that we 54 * have 2048 entries in the first level, each of which is 8 bytes (iow, two 55 * hardware pointers to the second level.) The second level contains two 56 * hardware PTE tables arranged contiguously, followed by Linux versions 57 * which contain the state information Linux needs. We, therefore, end up 58 * with 512 entries in the "PTE" level. 59 * 60 * This leads to the page tables having the following layout: 61 * 62 * pgd pte 63 * | | 64 * +--------+ +0 65 * | |-----> +------------+ +0 66 * +- - - - + +4 | h/w pt 0 | 67 * | |-----> +------------+ +1024 68 * +--------+ +8 | h/w pt 1 | 69 * | | +------------+ +2048 70 * +- - - - + | Linux pt 0 | 71 * | | +------------+ +3072 72 * +--------+ | Linux pt 1 | 73 * | | +------------+ +4096 74 * 75 * See L_PTE_xxx below for definitions of bits in the "Linux pt", and 76 * PTE_xxx for definitions of bits appearing in the "h/w pt". 77 * 78 * PMD_xxx definitions refer to bits in the first level page table. 79 * 80 * The "dirty" bit is emulated by only granting hardware write permission 81 * iff the page is marked "writable" and "dirty" in the Linux PTE. This 82 * means that a write to a clean page will cause a permission fault, and 83 * the Linux MM layer will mark the page dirty via handle_pte_fault(). 84 * For the hardware to notice the permission change, the TLB entry must 85 * be flushed, and ptep_establish() does that for us. 86 * 87 * The "accessed" or "young" bit is emulated by a similar method; we only 88 * allow accesses to the page if the "young" bit is set. Accesses to the 89 * page will cause a fault, and handle_pte_fault() will set the young bit 90 * for us as long as the page is marked present in the corresponding Linux 91 * PTE entry. Again, ptep_establish() will ensure that the TLB is up to 92 * date. 93 * 94 * However, when the "young" bit is cleared, we deny access to the page 95 * by clearing the hardware PTE. Currently Linux does not flush the TLB 96 * for us in this case, which means the TLB will retain the transation 97 * until either the TLB entry is evicted under pressure, or a context 98 * switch which changes the user space mapping occurs. 99 */ 100 #define PTRS_PER_PTE 512 101 #define PTRS_PER_PMD 1 102 #define PTRS_PER_PGD 2048 103 104 /* 105 * PMD_SHIFT determines the size of the area a second-level page table can map 106 * PGDIR_SHIFT determines what a third-level page table entry can map 107 */ 108 #define PMD_SHIFT 21 109 #define PGDIR_SHIFT 21 110 111 #define LIBRARY_TEXT_START 0x0c000000 112 113 #ifndef __ASSEMBLY__ 114 extern void __pte_error(const char *file, int line, unsigned long val); 115 extern void __pmd_error(const char *file, int line, unsigned long val); 116 extern void __pgd_error(const char *file, int line, unsigned long val); 117 118 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) 119 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) 120 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) 121 #endif /* !__ASSEMBLY__ */ 122 123 #define PMD_SIZE (1UL << PMD_SHIFT) 124 #define PMD_MASK (~(PMD_SIZE-1)) 125 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 126 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 127 128 /* 129 * This is the lowest virtual address we can permit any user space 130 * mapping to be mapped at. This is particularly important for 131 * non-high vector CPUs. 132 */ 133 #define FIRST_USER_ADDRESS PAGE_SIZE 134 135 #define FIRST_USER_PGD_NR 1 136 #define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR) 137 138 /* 139 * ARMv6 supersection address mask and size definitions. 140 */ 141 #define SUPERSECTION_SHIFT 24 142 #define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT) 143 #define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1)) 144 145 /* 146 * "Linux" PTE definitions. 147 * 148 * We keep two sets of PTEs - the hardware and the linux version. 149 * This allows greater flexibility in the way we map the Linux bits 150 * onto the hardware tables, and allows us to have YOUNG and DIRTY 151 * bits. 152 * 153 * The PTE table pointer refers to the hardware entries; the "Linux" 154 * entries are stored 1024 bytes below. 155 */ 156 #define L_PTE_PRESENT (1 << 0) 157 #define L_PTE_FILE (1 << 1) /* only when !PRESENT */ 158 #define L_PTE_YOUNG (1 << 1) 159 #define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */ 160 #define L_PTE_CACHEABLE (1 << 3) /* matches PTE */ 161 #define L_PTE_USER (1 << 4) 162 #define L_PTE_WRITE (1 << 5) 163 #define L_PTE_EXEC (1 << 6) 164 #define L_PTE_DIRTY (1 << 7) 165 #define L_PTE_COHERENT (1 << 9) /* I/O coherent (xsc3) */ 166 #define L_PTE_SHARED (1 << 10) /* shared between CPUs (v6) */ 167 #define L_PTE_ASID (1 << 11) /* non-global (use ASID, v6) */ 168 169 #ifndef __ASSEMBLY__ 170 171 /* 172 * The following macros handle the cache and bufferable bits... 173 */ 174 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_CACHEABLE | L_PTE_BUFFERABLE 175 #define _L_PTE_READ L_PTE_USER | L_PTE_EXEC 176 177 extern pgprot_t pgprot_kernel; 178 179 #define PAGE_NONE __pgprot(_L_PTE_DEFAULT) 180 #define PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ) 181 #define PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE) 182 #define PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ) 183 #define PAGE_KERNEL pgprot_kernel 184 185 #endif /* __ASSEMBLY__ */ 186 187 /* 188 * The table below defines the page protection levels that we insert into our 189 * Linux page table version. These get translated into the best that the 190 * architecture can perform. Note that on most ARM hardware: 191 * 1) We cannot do execute protection 192 * 2) If we could do execute protection, then read is implied 193 * 3) write implies read permissions 194 */ 195 #define __P000 PAGE_NONE 196 #define __P001 PAGE_READONLY 197 #define __P010 PAGE_COPY 198 #define __P011 PAGE_COPY 199 #define __P100 PAGE_READONLY 200 #define __P101 PAGE_READONLY 201 #define __P110 PAGE_COPY 202 #define __P111 PAGE_COPY 203 204 #define __S000 PAGE_NONE 205 #define __S001 PAGE_READONLY 206 #define __S010 PAGE_SHARED 207 #define __S011 PAGE_SHARED 208 #define __S100 PAGE_READONLY 209 #define __S101 PAGE_READONLY 210 #define __S110 PAGE_SHARED 211 #define __S111 PAGE_SHARED 212 213 #ifndef __ASSEMBLY__ 214 /* 215 * ZERO_PAGE is a global shared page that is always zero: used 216 * for zero-mapped memory areas etc.. 217 */ 218 extern struct page *empty_zero_page; 219 #define ZERO_PAGE(vaddr) (empty_zero_page) 220 221 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) 222 #define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) 223 224 #define pte_none(pte) (!pte_val(pte)) 225 #define pte_clear(mm,addr,ptep) set_pte_at((mm),(addr),(ptep), __pte(0)) 226 #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 227 #define pte_offset_kernel(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) 228 #define pte_offset_map(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) 229 #define pte_offset_map_nested(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) 230 #define pte_unmap(pte) do { } while (0) 231 #define pte_unmap_nested(pte) do { } while (0) 232 233 #define set_pte(ptep, pte) cpu_set_pte(ptep,pte) 234 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 235 236 /* 237 * The following only work if pte_present() is true. 238 * Undefined behaviour if not.. 239 */ 240 #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) 241 #define pte_read(pte) (pte_val(pte) & L_PTE_USER) 242 #define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) 243 #define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC) 244 #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) 245 #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) 246 247 /* 248 * The following only works if pte_present() is not true. 249 */ 250 #define pte_file(pte) (pte_val(pte) & L_PTE_FILE) 251 #define pte_to_pgoff(x) (pte_val(x) >> 2) 252 #define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE) 253 254 #define PTE_FILE_MAX_BITS 30 255 256 #define PTE_BIT_FUNC(fn,op) \ 257 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } 258 259 /*PTE_BIT_FUNC(rdprotect, &= ~L_PTE_USER);*/ 260 /*PTE_BIT_FUNC(mkread, |= L_PTE_USER);*/ 261 PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE); 262 PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE); 263 PTE_BIT_FUNC(exprotect, &= ~L_PTE_EXEC); 264 PTE_BIT_FUNC(mkexec, |= L_PTE_EXEC); 265 PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); 266 PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); 267 PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); 268 PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); 269 270 /* 271 * Mark the prot value as uncacheable and unbufferable. 272 */ 273 #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE)) 274 #define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE) 275 276 #define pmd_none(pmd) (!pmd_val(pmd)) 277 #define pmd_present(pmd) (pmd_val(pmd)) 278 #define pmd_bad(pmd) (pmd_val(pmd) & 2) 279 280 #define copy_pmd(pmdpd,pmdps) \ 281 do { \ 282 pmdpd[0] = pmdps[0]; \ 283 pmdpd[1] = pmdps[1]; \ 284 flush_pmd_entry(pmdpd); \ 285 } while (0) 286 287 #define pmd_clear(pmdp) \ 288 do { \ 289 pmdp[0] = __pmd(0); \ 290 pmdp[1] = __pmd(0); \ 291 clean_pmd_entry(pmdp); \ 292 } while (0) 293 294 static inline pte_t *pmd_page_kernel(pmd_t pmd) 295 { 296 unsigned long ptr; 297 298 ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1); 299 ptr += PTRS_PER_PTE * sizeof(void *); 300 301 return __va(ptr); 302 } 303 304 #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) 305 306 /* 307 * Permanent address of a page. We never have highmem, so this is trivial. 308 */ 309 #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) 310 311 /* 312 * Conversion functions: convert a page and protection to a page entry, 313 * and a page entry and page directory to the page they refer to. 314 */ 315 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) 316 317 /* 318 * The "pgd_xxx()" functions here are trivial for a folded two-level 319 * setup: the pgd is never bad, and a pmd always exists (as it's folded 320 * into the pgd entry) 321 */ 322 #define pgd_none(pgd) (0) 323 #define pgd_bad(pgd) (0) 324 #define pgd_present(pgd) (1) 325 #define pgd_clear(pgdp) do { } while (0) 326 #define set_pgd(pgd,pgdp) do { } while (0) 327 328 /* to find an entry in a page-table-directory */ 329 #define pgd_index(addr) ((addr) >> PGDIR_SHIFT) 330 331 #define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) 332 333 /* to find an entry in a kernel page-table-directory */ 334 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) 335 336 /* Find an entry in the second-level page table.. */ 337 #define pmd_offset(dir, addr) ((pmd_t *)(dir)) 338 339 /* Find an entry in the third-level page table.. */ 340 #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 341 342 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 343 { 344 const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER; 345 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 346 return pte; 347 } 348 349 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 350 351 /* Encode and decode a swap entry. 352 * 353 * We support up to 32GB of swap on 4k machines 354 */ 355 #define __swp_type(x) (((x).val >> 2) & 0x7f) 356 #define __swp_offset(x) ((x).val >> 9) 357 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) }) 358 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 359 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 360 361 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ 362 /* FIXME: this is not correct */ 363 #define kern_addr_valid(addr) (1) 364 365 #include <asm-generic/pgtable.h> 366 367 /* 368 * We provide our own arch_get_unmapped_area to cope with VIPT caches. 369 */ 370 #define HAVE_ARCH_UNMAPPED_AREA 371 372 /* 373 * remap a physical page `pfn' of size `size' with page protection `prot' 374 * into virtual address `from' 375 */ 376 #define io_remap_pfn_range(vma,from,pfn,size,prot) \ 377 remap_pfn_range(vma, from, pfn, size, prot) 378 379 #define MK_IOSPACE_PFN(space, pfn) (pfn) 380 #define GET_IOSPACE(pfn) 0 381 #define GET_PFN(pfn) (pfn) 382 383 #define pgtable_cache_init() do { } while (0) 384 385 #endif /* !__ASSEMBLY__ */ 386 387 #endif /* CONFIG_MMU */ 388 389 #endif /* _ASMARM_PGTABLE_H */ 390