Home | History | Annotate | Download | only in linux

Lines Matching refs:address

59 	struct mm_struct * vm_mm;	/* The address space we belong to. */
60 unsigned long vm_start; /* Our start address within vm_mm. */
61 unsigned long vm_end; /* The first byte after our end address
64 /* linked list of VM areas per task, sorted by address */
73 * For areas with an address space and backing store,
199 struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type);
200 int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
257 * On machines where all RAM is mapped into kernel address space,
258 * we can simply calculate the virtual address. On machines with
260 * dynamically, so we need a place to store that address.
267 void *virtual; /* Kernel virtual address (NULL if
370 * page's address_space. Usually, this is the address of a circular
377 * Instead of keeping dirty/clean pages in per address-space lists, we instead
540 #define set_page_address(page, address) \
542 (page)->virtual = (address); \
555 #define set_page_address(page, address) do { } while(0)
566 * refers to user virtual address space into which the page is mapped.
649 unsigned long address, int *type);
712 unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
742 unsigned long address, int write_access);
745 struct vm_area_struct *vma, unsigned long address,
748 return __handle_mm_fault(mm, vma, address, write_access) &
753 struct vm_area_struct *vma, unsigned long address,
807 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
808 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
809 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
810 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
817 static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
819 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
820 NULL: pud_offset(pgd, address);
823 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
825 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
826 NULL: pmd_offset(pud, address);
852 #define pte_offset_map_lock(mm, pmd, address, ptlp) \
855 pte_t *__pte = pte_offset_map(pmd, address); \
866 #define pte_alloc_map(mm, pmd, address) \
867 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
868 NULL: pte_offset_map(pmd, address))
870 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \
871 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
872 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
874 #define pte_alloc_kernel(pmd, address) \
875 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
876 NULL: pte_offset_kernel(pmd, address))
990 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
992 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1023 struct page *follow_page(struct vm_area_struct *, unsigned long address,