Home | History | Annotate | Download | only in linux
      1 #ifndef _LINUX_PAGEMAP_H
      2 #define _LINUX_PAGEMAP_H
      3 
      4 /*
      5  * Copyright 1995 Linus Torvalds
      6  */
      7 #include <linux/mm.h>
      8 #include <linux/fs.h>
      9 #include <linux/list.h>
     10 #include <linux/highmem.h>
     11 #include <linux/compiler.h>
     12 #include <asm/uaccess.h>
     13 #include <linux/gfp.h>
     14 
     15 /*
     16  * Bits in mapping->flags.  The lower __GFP_BITS_SHIFT bits are the page
     17  * allocation mode flags.
     18  */
     19 #define	AS_EIO		(__GFP_BITS_SHIFT + 0)	/* IO error on async write */
     20 #define AS_ENOSPC	(__GFP_BITS_SHIFT + 1)	/* ENOSPC on async write */
     21 
     22 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
     23 {
     24 	return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
     25 }
     26 
     27 /*
     28  * This is non-atomic.  Only to be used before the mapping is activated.
     29  * Probably needs a barrier...
     30  */
     31 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
     32 {
     33 	m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
     34 				(__force unsigned long)mask;
     35 }
     36 
     37 /*
     38  * The page cache can done in larger chunks than
     39  * one page, because it allows for more efficient
     40  * throughput (it can then be mapped into user
     41  * space in smaller chunks for same flexibility).
     42  *
     43  * Or rather, it _will_ be done in larger chunks.
     44  */
     45 #define PAGE_CACHE_SHIFT	PAGE_SHIFT
     46 #define PAGE_CACHE_SIZE		PAGE_SIZE
     47 #define PAGE_CACHE_MASK		PAGE_MASK
     48 #define PAGE_CACHE_ALIGN(addr)	(((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
     49 
     50 #define page_cache_get(page)		get_page(page)
     51 #define page_cache_release(page)	put_page(page)
     52 void release_pages(struct page **pages, int nr, int cold);
     53 
     54 #ifdef CONFIG_NUMA
     55 extern struct page *page_cache_alloc(struct address_space *x);
     56 extern struct page *page_cache_alloc_cold(struct address_space *x);
     57 #else
     58 static inline struct page *page_cache_alloc(struct address_space *x)
     59 {
     60 	return alloc_pages(mapping_gfp_mask(x), 0);
     61 }
     62 
     63 static inline struct page *page_cache_alloc_cold(struct address_space *x)
     64 {
     65 	return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
     66 }
     67 #endif
     68 
     69 typedef int filler_t(void *, struct page *);
     70 
     71 extern struct page * find_get_page(struct address_space *mapping,
     72 				unsigned long index);
     73 extern struct page * find_lock_page(struct address_space *mapping,
     74 				unsigned long index);
     75 extern __deprecated_for_modules struct page * find_trylock_page(
     76 			struct address_space *mapping, unsigned long index);
     77 extern struct page * find_or_create_page(struct address_space *mapping,
     78 				unsigned long index, gfp_t gfp_mask);
     79 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
     80 			unsigned int nr_pages, struct page **pages);
     81 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
     82 			       unsigned int nr_pages, struct page **pages);
     83 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
     84 			int tag, unsigned int nr_pages, struct page **pages);
     85 
     86 /*
     87  * Returns locked page at given index in given cache, creating it if needed.
     88  */
     89 static inline struct page *grab_cache_page(struct address_space *mapping, unsigned long index)
     90 {
     91 	return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
     92 }
     93 
     94 extern struct page * grab_cache_page_nowait(struct address_space *mapping,
     95 				unsigned long index);
     96 extern struct page * read_cache_page(struct address_space *mapping,
     97 				unsigned long index, filler_t *filler,
     98 				void *data);
     99 extern int read_cache_pages(struct address_space *mapping,
    100 		struct list_head *pages, filler_t *filler, void *data);
    101 
    102 static inline struct page *read_mapping_page(struct address_space *mapping,
    103 					     unsigned long index, void *data)
    104 {
    105 	filler_t *filler = (filler_t *)mapping->a_ops->readpage;
    106 	return read_cache_page(mapping, index, filler, data);
    107 }
    108 
    109 int add_to_page_cache(struct page *page, struct address_space *mapping,
    110 				unsigned long index, gfp_t gfp_mask);
    111 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
    112 				unsigned long index, gfp_t gfp_mask);
    113 extern void remove_from_page_cache(struct page *page);
    114 extern void __remove_from_page_cache(struct page *page);
    115 
    116 /*
    117  * Return byte-offset into filesystem object for page.
    118  */
    119 static inline loff_t page_offset(struct page *page)
    120 {
    121 	return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
    122 }
    123 
    124 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
    125 					unsigned long address)
    126 {
    127 	pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
    128 	pgoff += vma->vm_pgoff;
    129 	return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
    130 }
    131 
    132 extern void FASTCALL(__lock_page(struct page *page));
    133 extern void FASTCALL(unlock_page(struct page *page));
    134 
    135 static inline void lock_page(struct page *page)
    136 {
    137 	might_sleep();
    138 	if (TestSetPageLocked(page))
    139 		__lock_page(page);
    140 }
    141 
    142 /*
    143  * This is exported only for wait_on_page_locked/wait_on_page_writeback.
    144  * Never use this directly!
    145  */
    146 extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr));
    147 
    148 /*
    149  * Wait for a page to be unlocked.
    150  *
    151  * This must be called with the caller "holding" the page,
    152  * ie with increased "page->count" so that the page won't
    153  * go away during the wait..
    154  */
    155 static inline void wait_on_page_locked(struct page *page)
    156 {
    157 	if (PageLocked(page))
    158 		wait_on_page_bit(page, PG_locked);
    159 }
    160 
    161 /*
    162  * Wait for a page to complete writeback
    163  */
    164 static inline void wait_on_page_writeback(struct page *page)
    165 {
    166 	if (PageWriteback(page))
    167 		wait_on_page_bit(page, PG_writeback);
    168 }
    169 
    170 extern void end_page_writeback(struct page *page);
    171 
    172 /*
    173  * Fault a userspace page into pagetables.  Return non-zero on a fault.
    174  *
    175  * This assumes that two userspace pages are always sufficient.  That's
    176  * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
    177  */
    178 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
    179 {
    180 	int ret;
    181 
    182 	/*
    183 	 * Writing zeroes into userspace here is OK, because we know that if
    184 	 * the zero gets there, we'll be overwriting it.
    185 	 */
    186 	ret = __put_user(0, uaddr);
    187 	if (ret == 0) {
    188 		char __user *end = uaddr + size - 1;
    189 
    190 		/*
    191 		 * If the page was already mapped, this will get a cache miss
    192 		 * for sure, so try to avoid doing it.
    193 		 */
    194 		if (((unsigned long)uaddr & PAGE_MASK) !=
    195 				((unsigned long)end & PAGE_MASK))
    196 		 	ret = __put_user(0, end);
    197 	}
    198 	return ret;
    199 }
    200 
    201 static inline void fault_in_pages_readable(const char __user *uaddr, int size)
    202 {
    203 	volatile char c;
    204 	int ret;
    205 
    206 	ret = __get_user(c, uaddr);
    207 	if (ret == 0) {
    208 		const char __user *end = uaddr + size - 1;
    209 
    210 		if (((unsigned long)uaddr & PAGE_MASK) !=
    211 				((unsigned long)end & PAGE_MASK))
    212 		 	__get_user(c, end);
    213 	}
    214 }
    215 
    216 #endif /* _LINUX_PAGEMAP_H */
    217