Home | History | Annotate | Download | only in linux

Lines Matching defs:page

37 #include <asm/page.h>
41 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
55 * space that has a special rule for the page-fault handlers (ie a shared
148 #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
163 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
186 * low four bits) to a page protection mask..
194 * to the functions called when a no-page or a wp-page exception occurs.
199 struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type);
202 /* notification that a previously read-only page is about to become
204 int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
218 * Each physical page in the system has a struct page associated with
219 * it to keep track of whatever it is we are using the page for at the
221 * a page.
223 struct page {
228 * to show when page is mapped
242 * If page mapped as anonymous
264 * WANT_PAGE_VIRTUAL in asm/page.h
272 #define page_private(page) ((page)->private)
273 #define set_page_private(page, v) ((page)->private = (v))
276 * FIXME: take this include out, include page-flags.h in
279 #include <linux/page-flags.h>
282 * Methods to modify the page usage count.
284 * What counts for a page usage:
285 * - cache mapping (page->mapping)
286 * - private data (page->private)
287 * - page mapped in a task's page tables, each mapping
290 * Also, many kernel routines increase the page count before a critical
291 * routine so they can be sure the page doesn't go away from under them.
295 * Drop a ref, return true if the logical refcount fell to zero (the page has
298 static inline int put_page_testzero(struct page *page)
300 BUG_ON(atomic_read(&page->_count) == 0);
301 return atomic_dec_and_test(&page->_count);
305 * Try to grab a ref unless the page has a refcount of zero, return false if
308 static inline int get_page_unless_zero(struct page *page)
310 return atomic_inc_not_zero(&page->_count);
313 extern void FASTCALL(__page_cache_release(struct page *));
315 static inline int page_count(struct page *page)
317 if (unlikely(PageCompound(page)))
318 page = (struct page *)page_private(page);
319 return atomic_read(&page->_count);
322 static inline void get_page(struct page *page)
324 if (unlikely(PageCompound(page)))
325 page = (struct page *)page_private(page);
326 atomic_inc(&page->_count);
330 * Setup the page count before being freed into the page allocator for
333 static inline void init_page_count(struct page *page)
335 atomic_set(&page->_count, 1);
338 void put_page(struct page *page);
341 void split_page(struct page *page, unsigned int order);
344 * Multiple processes may "see" the same page. E.g. for untouched
345 * mappings of /dev/null, all processes see the same page full of
349 * For the non-reserved pages, page_count(page) denotes a reference count.
350 * page_count() == 0 means the page is free. page->lru is then used for
352 * page_count() == 1 means the page is used for exactly one purpose
353 * (e.g. a private data page of one process).
355 * A page may be used for kmalloc() or anyone else who does a
358 * management of this page is the responsibility of the one who uses
365 * A page may belong to an inode's memory mapping. In this case,
366 * page->mapping is the pointer to the inode, and page->index is the
367 * file offset of the page, in units of PAGE_CACHE_SIZE.
369 * A page contains an opaque `private' member, which belongs to the
370 * page's address_space. Usually, this is the address of a circular
371 * list of the page's disk buffers.
375 * the page cache itself.
380 * There is also a per-mapping radix tree mapping index to the page
398 * page->flags layout:
400 * There are three possibilities for how page->flags get
425 /* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */
431 * We are going to use the flags for the page to node mapping if its in
449 /* NODE:ZONE or SECTION:ZONE is used to lookup the zone from a page. */
466 static inline unsigned long page_zonenum(struct page *page)
468 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
474 static inline int page_zone_id(struct page *page)
476 return (page->flags >> ZONETABLE_PGSHIFT) & ZONETABLE_MASK;
478 static inline struct zone *page_zone(struct page *page)
480 return zone_table[page_zone_id(page)];
483 static inline unsigned long page_to_nid(struct page *page)
486 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
488 return page_zone(page)->zone_pgdat->node_id;
490 static inline unsigned long page_to_section(struct page *page)
492 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
495 static inline void set_page_zone(struct page *page, unsigned long zone)
497 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
498 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
500 static inline void set_page_node(struct page *page, unsigned long node)
502 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
503 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
505 static inline void set_page_section(struct page *page, unsigned long section)
507 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
508 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
511 static inline void set_page_links(struct page *page, unsigned long zone,
514 set_page_zone(page, zone);
515 set_page_node(page, node);
516 set_page_section(page, pfn_to_section_nr(pfn));
526 extern struct page *mem_map;
529 static __always_inline void *lowmem_page_address(struct page *page)
531 return __va(page_to_pfn(page) << PAGE_SHIFT);
539 #define page_address(page) ((page)->virtual)
540 #define set_page_address(page, address) \
542 (page)->virtual = (address); \
548 void *page_address(struct page *page);
549 void set_page_address(struct page *page, void *virtual);
554 #define page_address(page) lowmem_page_address(page)
555 #define set_page_address(page, address) do { } while(0)
560 * On an anonymous page mapped into a user virtual memory area,
561 * page->mapping points to its anon_vma, not to a struct address_space;
565 * address_space which maps the page from disk; whereas "page_mapped"
566 * refers to user virtual address space into which the page is mapped.
571 static inline struct address_space *page_mapping(struct page *page)
573 struct address_space *mapping = page->mapping;
575 if (unlikely(PageSwapCache(page)))
582 static inline int PageAnon(struct page *page)
584 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
588 * Return the pagecache index of the passed page. Regular pagecache pages
591 static inline pgoff_t page_index(struct page *page)
593 if (unlikely(PageSwapCache(page)))
594 return page_private(page);
595 return page->index;
599 * The atomic page->_mapcount, like _count, starts from -1:
603 static inline void reset_page_mapcount(struct page *page)
605 atomic_set(&(page)->_mapcount, -1);
608 static inline int page_mapcount(struct page *page)
610 return atomic_read(&(page)->_mapcount) + 1;
614 * Return true if this page is mapped into pagetables.
616 static inline int page_mapped(struct page *page)
618 return atomic_read(&(page)->_mapcount) >= 0;
625 #define NOPAGE_OOM ((struct page *) (-1))
648 struct page *shmem_nopage(struct vm_area_struct *vma,
703 struct vm_area_struct *nonlinear_vma; /* Check page->index if set */
704 struct address_space *check_mapping; /* Check page->mapping if set */
705 pgoff_t first_index; /* Lowest page->index to unmap */
706 pgoff_t last_index; /* Highest page->index to unmap */
711 struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t);
737 extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);
764 void install_arg_page(struct vm_area_struct *, struct page *, unsigned long);
767 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
770 int __set_page_dirty_buffers(struct page *page);
771 int __set_page_dirty_nobuffers(struct page *page);
773 struct page *page);
774 int FASTCALL(set_page_dirty(struct page *page));
775 int set_page_dirty_lock(struct page *page);
776 int clear_page_dirty_for_io(struct page *page);
832 * We tuck a spinlock to guard each pagetable page into its struct page,
833 * at page->private, with BUILD_BUG_ON to make sure that this will not
834 * overflow into the next struct page (as it might with DEBUG_SPINLOCK).
835 * When freeing, reset page->mapping so free_pages_check won't complain.
837 #define __pte_lockptr(page) &((page)->ptl)
841 #define pte_lock_deinit(page) ((page)->mapping = NULL)
847 #define pte_lock_init(page) do {} while (0)
848 #define pte_lock_deinit(page) do {} while (0)
957 extern unsigned long page_unuse(struct page *);
963 extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *);
967 /* mm/page-writeback.c */
968 int write_one_page(struct page *page
972 #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
1017 struct page *vmalloc_to_page(void *addr);
1021 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
1023 struct page *follow_page(struct vm_area_struct *, unsigned long address,
1026 #define FOLL_TOUCH 0x02 /* mark page accessed */
1027 #define FOLL_GET 0x04 /* do get_page on page */
1041 kernel_map_pages(struct page *page, int numpages, int enable)
1043 if (!PageHighMem(page) && !enable)
1044 debug_check_no_locks_freed(page_address(page),