xref: /linux/arch/sh/mm/cache-sh7705.c (revision 37744feebc086908fd89760650f458ab19071750)
1 /*
2  * arch/sh/mm/cache-sh7705.c
3  *
4  * Copyright (C) 1999, 2000  Niibe Yutaka
5  * Copyright (C) 2004  Alex Song
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License.  See the file "COPYING" in the main directory of this archive
9  * for more details.
10  *
11  */
12 #include <linux/init.h>
13 #include <linux/mman.h>
14 #include <linux/mm.h>
15 #include <linux/fs.h>
16 #include <linux/threads.h>
17 #include <asm/addrspace.h>
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20 #include <asm/processor.h>
21 #include <asm/cache.h>
22 #include <asm/io.h>
23 #include <linux/uaccess.h>
24 #include <asm/pgalloc.h>
25 #include <asm/mmu_context.h>
26 #include <asm/cacheflush.h>
27 
28 /*
29  * The 32KB cache on the SH7705 suffers from the same synonym problem
30  * as SH4 CPUs
31  */
32 static inline void cache_wback_all(void)
33 {
34 	unsigned long ways, waysize, addrstart;
35 
36 	ways = current_cpu_data.dcache.ways;
37 	waysize = current_cpu_data.dcache.sets;
38 	waysize <<= current_cpu_data.dcache.entry_shift;
39 
40 	addrstart = CACHE_OC_ADDRESS_ARRAY;
41 
42 	do {
43 		unsigned long addr;
44 
45 		for (addr = addrstart;
46 		     addr < addrstart + waysize;
47 		     addr += current_cpu_data.dcache.linesz) {
48 			unsigned long data;
49 			int v = SH_CACHE_UPDATED | SH_CACHE_VALID;
50 
51 			data = __raw_readl(addr);
52 
53 			if ((data & v) == v)
54 				__raw_writel(data & ~v, addr);
55 
56 		}
57 
58 		addrstart += current_cpu_data.dcache.way_incr;
59 	} while (--ways);
60 }
61 
62 /*
63  * Write back the range of D-cache, and purge the I-cache.
64  *
65  * Called from kernel/module.c:sys_init_module and routine for a.out format.
66  */
67 static void sh7705_flush_icache_range(void *args)
68 {
69 	struct flusher_data *data = args;
70 	unsigned long start, end;
71 
72 	start = data->addr1;
73 	end = data->addr2;
74 
75 	__flush_wback_region((void *)start, end - start);
76 }
77 
78 /*
79  * Writeback&Invalidate the D-cache of the page
80  */
81 static void __flush_dcache_page(unsigned long phys)
82 {
83 	unsigned long ways, waysize, addrstart;
84 	unsigned long flags;
85 
86 	phys |= SH_CACHE_VALID;
87 
88 	/*
89 	 * Here, phys is the physical address of the page. We check all the
90 	 * tags in the cache for those with the same page number as this page
91 	 * (by masking off the lowest 2 bits of the 19-bit tag; these bits are
92 	 * derived from the offset within in the 4k page). Matching valid
93 	 * entries are invalidated.
94 	 *
95 	 * Since 2 bits of the cache index are derived from the virtual page
96 	 * number, knowing this would reduce the number of cache entries to be
97 	 * searched by a factor of 4. However this function exists to deal with
98 	 * potential cache aliasing, therefore the optimisation is probably not
99 	 * possible.
100 	 */
101 	local_irq_save(flags);
102 	jump_to_uncached();
103 
104 	ways = current_cpu_data.dcache.ways;
105 	waysize = current_cpu_data.dcache.sets;
106 	waysize <<= current_cpu_data.dcache.entry_shift;
107 
108 	addrstart = CACHE_OC_ADDRESS_ARRAY;
109 
110 	do {
111 		unsigned long addr;
112 
113 		for (addr = addrstart;
114 		     addr < addrstart + waysize;
115 		     addr += current_cpu_data.dcache.linesz) {
116 			unsigned long data;
117 
118 			data = __raw_readl(addr) & (0x1ffffC00 | SH_CACHE_VALID);
119 		        if (data == phys) {
120 				data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED);
121 				__raw_writel(data, addr);
122 			}
123 		}
124 
125 		addrstart += current_cpu_data.dcache.way_incr;
126 	} while (--ways);
127 
128 	back_to_cached();
129 	local_irq_restore(flags);
130 }
131 
132 /*
133  * Write back & invalidate the D-cache of the page.
134  * (To avoid "alias" issues)
135  */
136 static void sh7705_flush_dcache_page(void *arg)
137 {
138 	struct page *page = arg;
139 	struct address_space *mapping = page_mapping_file(page);
140 
141 	if (mapping && !mapping_mapped(mapping))
142 		clear_bit(PG_dcache_clean, &page->flags);
143 	else
144 		__flush_dcache_page(__pa(page_address(page)));
145 }
146 
147 static void sh7705_flush_cache_all(void *args)
148 {
149 	unsigned long flags;
150 
151 	local_irq_save(flags);
152 	jump_to_uncached();
153 
154 	cache_wback_all();
155 	back_to_cached();
156 	local_irq_restore(flags);
157 }
158 
159 /*
160  * Write back and invalidate I/D-caches for the page.
161  *
162  * ADDRESS: Virtual Address (U0 address)
163  */
164 static void sh7705_flush_cache_page(void *args)
165 {
166 	struct flusher_data *data = args;
167 	unsigned long pfn = data->addr2;
168 
169 	__flush_dcache_page(pfn << PAGE_SHIFT);
170 }
171 
172 /*
173  * This is called when a page-cache page is about to be mapped into a
174  * user process' address space.  It offers an opportunity for a
175  * port to ensure d-cache/i-cache coherency if necessary.
176  *
177  * Not entirely sure why this is necessary on SH3 with 32K cache but
178  * without it we get occasional "Memory fault" when loading a program.
179  */
180 static void sh7705_flush_icache_page(void *page)
181 {
182 	__flush_purge_region(page_address(page), PAGE_SIZE);
183 }
184 
185 void __init sh7705_cache_init(void)
186 {
187 	local_flush_icache_range	= sh7705_flush_icache_range;
188 	local_flush_dcache_page		= sh7705_flush_dcache_page;
189 	local_flush_cache_all		= sh7705_flush_cache_all;
190 	local_flush_cache_mm		= sh7705_flush_cache_all;
191 	local_flush_cache_dup_mm	= sh7705_flush_cache_all;
192 	local_flush_cache_range		= sh7705_flush_cache_all;
193 	local_flush_cache_page		= sh7705_flush_cache_page;
194 	local_flush_icache_page		= sh7705_flush_icache_page;
195 }
196