Home | History | Annotate | Download | only in asm-x86

Lines Matching refs:address

278 #define ptep_set_access_flags(vma, address, ptep, entry, dirty)		\
283 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
284 flush_tlb_page(vma, address); \
301 #define ptep_clear_flush_young(vma, address, ptep) \
304 __young = ptep_test_and_clear_young((vma), (address), (ptep)); \
306 flush_tlb_page(vma, address); \
324 * Full address destruction in progress; paravirt does not
393 * control the given virtual address
395 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
402 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
408 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
414 * control the given virtual address
416 #define pmd_index(address) \
417 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
423 * control the given virtual address
425 #define pte_index(address) \
426 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
427 #define pte_offset_kernel(dir, address) \
428 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
437 * the virtual address 'address'. NULL means no pagetable entry present.
441 extern pte_t *lookup_address(unsigned long address);
456 #define pte_offset_map(dir, address) \
457 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
458 #define pte_offset_map_nested(dir, address) \
459 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
463 #define pte_offset_map(dir, address) \
464 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
465 #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
481 #define update_mmu_cache(vma,address,pte) do { } while (0)