xref: /linux/arch/mips/mm/cache.c (revision 2b8232ce512105e28453f301d1510de8363bccd1)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 2007 MIPS Technologies, Inc.
8  */
9 #include <linux/fs.h>
10 #include <linux/fcntl.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/linkage.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17 
18 #include <asm/cacheflush.h>
19 #include <asm/processor.h>
20 #include <asm/cpu.h>
21 #include <asm/cpu-features.h>
22 
23 /* Cache operations. */
24 void (*flush_cache_all)(void);
25 void (*__flush_cache_all)(void);
26 void (*flush_cache_mm)(struct mm_struct *mm);
27 void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
28 	unsigned long end);
29 void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
30 	unsigned long pfn);
31 void (*flush_icache_range)(unsigned long start, unsigned long end);
32 
33 /* MIPS specific cache operations */
34 void (*flush_cache_sigtramp)(unsigned long addr);
35 void (*local_flush_data_cache_page)(void * addr);
36 void (*flush_data_cache_page)(unsigned long addr);
37 void (*flush_icache_all)(void);
38 
39 EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
40 EXPORT_SYMBOL(flush_data_cache_page);
41 
42 #ifdef CONFIG_DMA_NONCOHERENT
43 
44 /* DMA cache operations. */
45 void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
46 void (*_dma_cache_wback)(unsigned long start, unsigned long size);
47 void (*_dma_cache_inv)(unsigned long start, unsigned long size);
48 
49 EXPORT_SYMBOL(_dma_cache_wback_inv);
50 EXPORT_SYMBOL(_dma_cache_wback);
51 EXPORT_SYMBOL(_dma_cache_inv);
52 
53 #endif /* CONFIG_DMA_NONCOHERENT */
54 
55 /*
56  * We could optimize the case where the cache argument is not BCACHE but
57  * that seems very atypical use ...
58  */
59 asmlinkage int sys_cacheflush(unsigned long addr,
60 	unsigned long bytes, unsigned int cache)
61 {
62 	if (bytes == 0)
63 		return 0;
64 	if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
65 		return -EFAULT;
66 
67 	flush_icache_range(addr, addr + bytes);
68 
69 	return 0;
70 }
71 
72 void __flush_dcache_page(struct page *page)
73 {
74 	struct address_space *mapping = page_mapping(page);
75 	unsigned long addr;
76 
77 	if (PageHighMem(page))
78 		return;
79 	if (mapping && !mapping_mapped(mapping)) {
80 		SetPageDcacheDirty(page);
81 		return;
82 	}
83 
84 	/*
85 	 * We could delay the flush for the !page_mapping case too.  But that
86 	 * case is for exec env/arg pages and those are %99 certainly going to
87 	 * get faulted into the tlb (and thus flushed) anyways.
88 	 */
89 	addr = (unsigned long) page_address(page);
90 	flush_data_cache_page(addr);
91 }
92 
93 EXPORT_SYMBOL(__flush_dcache_page);
94 
95 void __flush_anon_page(struct page *page, unsigned long vmaddr)
96 {
97 	if (pages_do_alias((unsigned long)page_address(page), vmaddr)) {
98 		void *kaddr;
99 
100 		kaddr = kmap_coherent(page, vmaddr);
101 		flush_data_cache_page((unsigned long)kaddr);
102 		kunmap_coherent();
103 	}
104 }
105 
106 EXPORT_SYMBOL(__flush_anon_page);
107 
108 void __update_cache(struct vm_area_struct *vma, unsigned long address,
109 	pte_t pte)
110 {
111 	struct page *page;
112 	unsigned long pfn, addr;
113 	int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
114 
115 	pfn = pte_pfn(pte);
116 	if (unlikely(!pfn_valid(pfn)))
117 		return;
118 	page = pfn_to_page(pfn);
119 	if (page_mapping(page) && Page_dcache_dirty(page)) {
120 		addr = (unsigned long) page_address(page);
121 		if (exec || pages_do_alias(addr, address & PAGE_MASK))
122 			flush_data_cache_page(addr);
123 		ClearPageDcacheDirty(page);
124 	}
125 }
126 
127 static char cache_panic[] __initdata = "Yeee, unsupported cache architecture.";
128 
129 void __init cpu_cache_init(void)
130 {
131 	if (cpu_has_3k_cache) {
132 		extern void __weak r3k_cache_init(void);
133 
134 		r3k_cache_init();
135 		return;
136 	}
137 	if (cpu_has_6k_cache) {
138 		extern void __weak r6k_cache_init(void);
139 
140 		r6k_cache_init();
141 		return;
142 	}
143 	if (cpu_has_4k_cache) {
144 		extern void __weak r4k_cache_init(void);
145 
146 		r4k_cache_init();
147 		return;
148 	}
149 	if (cpu_has_8k_cache) {
150 		extern void __weak r8k_cache_init(void);
151 
152 		r8k_cache_init();
153 		return;
154 	}
155 	if (cpu_has_tx39_cache) {
156 		extern void __weak tx39_cache_init(void);
157 
158 		tx39_cache_init();
159 		return;
160 	}
161 
162 	panic(cache_panic);
163 }
164 
165 int __weak __uncached_access(struct file *file, unsigned long addr)
166 {
167 	if (file->f_flags & O_SYNC)
168 		return 1;
169 
170 	return addr >= __pa(high_memory);
171 }
172