Home | History | Annotate | Download | only in linux

Lines Matching defs:vma

67 	pgprot_t vm_page_prot;		/* Access permissions of this VMA. */
76 * linkage of vma in the address_space->i_mmap_nonlinear list.
89 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
90 * list, after a COW of one of the file pages. A MAP_SHARED vma
92 * or brk vma (with NULL file) can only be in an anon_vma list.
111 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
122 struct vm_area_struct *vma;
159 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
166 #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */
204 int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
206 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
207 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
209 int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
648 struct page *shmem_nopage(struct vm_area_struct *vma,
650 int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new);
651 struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
663 static inline int shmem_set_policy(struct vm_area_struct *vma,
669 static inline struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
676 extern int shmem_mmap(struct file *file, struct vm_area_struct *vma);
712 unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
723 struct vm_area_struct *vma);
724 int zeromap_page_range(struct vm_area_struct *vma, unsigned long from,
737 extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);
738 extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot);
741 extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma,
745 struct vm_area_struct *vma, unsigned long address,
748 return __handle_mm_fault(mm, vma, address, write_access) &
753 struct vm_area_struct *vma, unsigned long address,
899 struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,
902 #define vma_prio_tree_foreach(vma, iter, root, begin, end) \
903 for (prio_tree_iter_init(iter, root, begin, end), vma = NULL; \
904 (vma = vma_prio_tree_next(vma, iter)); )
906 static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
909 vma->shared.vm_set.parent = NULL;
910 list_add_tail(&vma->shared.vm_set.list, list);
915 extern void vma_adjust(struct vm_area_struct *vma, unsigned long start,
990 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
992 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
995 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
1000 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
1004 struct vm_area_struct * vma = find_vma(mm,start_addr);
1006 if (vma && end_addr <= vma->vm_start)
1007 vma = NULL;
1008 return vma;
1011 static inline unsigned long vma_pages(struct vm_area_struct *vma)
1013 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1074 const char *arch_vma_name(struct vm_area_struct *vma);