Home | History | Annotate | Download | only in asm-mips
      1 /*
      2  * This file is subject to the terms and conditions of the GNU General Public
      3  * License.  See the file "COPYING" in the main directory of this archive
      4  * for more details.
      5  *
      6  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle
      7  * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
      8  */
      9 #ifndef _ASM_CACHEFLUSH_H
     10 #define _ASM_CACHEFLUSH_H
     11 
     12 /* Keep includes the same across arches.  */
     13 #include <linux/mm.h>
     14 #include <asm/cpu-features.h>
     15 
     16 /* Cache flushing:
     17  *
     18  *  - flush_cache_all() flushes entire cache
     19  *  - flush_cache_mm(mm) flushes the specified mm context's cache lines
     20  *  - flush_cache_dup mm(mm) handles cache flushing when forking
     21  *  - flush_cache_page(mm, vmaddr, pfn) flushes a single page
     22  *  - flush_cache_range(vma, start, end) flushes a range of pages
     23  *  - flush_icache_range(start, end) flush a range of instructions
     24  *  - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
     25  *
     26  * MIPS specific flush operations:
     27  *
     28  *  - flush_cache_sigtramp() flush signal trampoline
     29  *  - flush_icache_all() flush the entire instruction cache
     30  *  - flush_data_cache_page() flushes a page from the data cache
     31  */
     32 extern void (*flush_cache_all)(void);
     33 extern void (*__flush_cache_all)(void);
     34 extern void (*flush_cache_mm)(struct mm_struct *mm);
     35 #define flush_cache_dup_mm(mm)	do { (void) (mm); } while (0)
     36 extern void (*flush_cache_range)(struct vm_area_struct *vma,
     37 	unsigned long start, unsigned long end);
     38 extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
     39 extern void __flush_dcache_page(struct page *page);
     40 
     41 static inline void flush_dcache_page(struct page *page)
     42 {
     43 	if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
     44 		__flush_dcache_page(page);
     45 
     46 }
     47 
     48 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
     49 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
     50 
     51 #define ARCH_HAS_FLUSH_ANON_PAGE
     52 extern void __flush_anon_page(struct page *, unsigned long);
     53 static inline void flush_anon_page(struct vm_area_struct *vma,
     54 	struct page *page, unsigned long vmaddr)
     55 {
     56 	if (cpu_has_dc_aliases && PageAnon(page))
     57 		__flush_anon_page(page, vmaddr);
     58 }
     59 
     60 static inline void flush_icache_page(struct vm_area_struct *vma,
     61 	struct page *page)
     62 {
     63 }
     64 
     65 extern void (*flush_icache_range)(unsigned long start, unsigned long end);
     66 extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
     67 
     68 extern void (*__flush_cache_vmap)(void);
     69 
     70 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
     71 {
     72 	if (cpu_has_dc_aliases)
     73 		__flush_cache_vmap();
     74 }
     75 
     76 extern void (*__flush_cache_vunmap)(void);
     77 
     78 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
     79 {
     80 	if (cpu_has_dc_aliases)
     81 		__flush_cache_vunmap();
     82 }
     83 
     84 extern void copy_to_user_page(struct vm_area_struct *vma,
     85 	struct page *page, unsigned long vaddr, void *dst, const void *src,
     86 	unsigned long len);
     87 
     88 extern void copy_from_user_page(struct vm_area_struct *vma,
     89 	struct page *page, unsigned long vaddr, void *dst, const void *src,
     90 	unsigned long len);
     91 
     92 extern void (*flush_cache_sigtramp)(unsigned long addr);
     93 extern void (*flush_icache_all)(void);
     94 extern void (*local_flush_data_cache_page)(void * addr);
     95 extern void (*flush_data_cache_page)(unsigned long addr);
     96 
     97 /*
     98  * This flag is used to indicate that the page pointed to by a pte
     99  * is dirty and requires cleaning before returning it to the user.
    100  */
    101 #define PG_dcache_dirty			PG_arch_1
    102 
    103 #define Page_dcache_dirty(page)		\
    104 	test_bit(PG_dcache_dirty, &(page)->flags)
    105 #define SetPageDcacheDirty(page)	\
    106 	set_bit(PG_dcache_dirty, &(page)->flags)
    107 #define ClearPageDcacheDirty(page)	\
    108 	clear_bit(PG_dcache_dirty, &(page)->flags)
    109 
    110 /* Run kernel code uncached, useful for cache probing functions. */
    111 unsigned long run_uncached(void *func);
    112 
    113 extern void *kmap_coherent(struct page *page, unsigned long addr);
    114 extern void kunmap_coherent(void);
    115 
    116 #endif /* _ASM_CACHEFLUSH_H */
    117