xref: /linux/arch/sh/mm/cache-sh2a.c (revision 827634added7f38b7d724cab1dccdb2b004c13c3)
1 /*
2  * arch/sh/mm/cache-sh2a.c
3  *
4  * Copyright (C) 2008 Yoshinori Sato
5  *
6  * Released under the terms of the GNU GPL v2.0.
7  */
8 
9 #include <linux/init.h>
10 #include <linux/mm.h>
11 
12 #include <asm/cache.h>
13 #include <asm/addrspace.h>
14 #include <asm/processor.h>
15 #include <asm/cacheflush.h>
16 #include <asm/io.h>
17 
18 /*
19  * The maximum number of pages we support up to when doing ranged dcache
20  * flushing. Anything exceeding this will simply flush the dcache in its
21  * entirety.
22  */
23 #define MAX_OCACHE_PAGES	32
24 #define MAX_ICACHE_PAGES	32
25 
26 #ifdef CONFIG_CACHE_WRITEBACK
27 static void sh2a_flush_oc_line(unsigned long v, int way)
28 {
29 	unsigned long addr = (v & 0x000007f0) | (way << 11);
30 	unsigned long data;
31 
32 	data = __raw_readl(CACHE_OC_ADDRESS_ARRAY | addr);
33 	if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
34 		data &= ~SH_CACHE_UPDATED;
35 		__raw_writel(data, CACHE_OC_ADDRESS_ARRAY | addr);
36 	}
37 }
38 #endif
39 
40 static void sh2a_invalidate_line(unsigned long cache_addr, unsigned long v)
41 {
42 	/* Set associative bit to hit all ways */
43 	unsigned long addr = (v & 0x000007f0) | SH_CACHE_ASSOC;
44 	__raw_writel((addr & CACHE_PHYSADDR_MASK), cache_addr | addr);
45 }
46 
47 /*
48  * Write back the dirty D-caches, but not invalidate them.
49  */
50 static void sh2a__flush_wback_region(void *start, int size)
51 {
52 #ifdef CONFIG_CACHE_WRITEBACK
53 	unsigned long v;
54 	unsigned long begin, end;
55 	unsigned long flags;
56 	int nr_ways;
57 
58 	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
59 	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
60 		& ~(L1_CACHE_BYTES-1);
61 	nr_ways = current_cpu_data.dcache.ways;
62 
63 	local_irq_save(flags);
64 	jump_to_uncached();
65 
66 	/* If there are too many pages then flush the entire cache */
67 	if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
68 		begin = CACHE_OC_ADDRESS_ARRAY;
69 		end = begin + (nr_ways * current_cpu_data.dcache.way_size);
70 
71 		for (v = begin; v < end; v += L1_CACHE_BYTES) {
72 			unsigned long data = __raw_readl(v);
73 			if (data & SH_CACHE_UPDATED)
74 				__raw_writel(data & ~SH_CACHE_UPDATED, v);
75 		}
76 	} else {
77 		int way;
78 		for (way = 0; way < nr_ways; way++) {
79 			for (v = begin; v < end; v += L1_CACHE_BYTES)
80 				sh2a_flush_oc_line(v, way);
81 		}
82 	}
83 
84 	back_to_cached();
85 	local_irq_restore(flags);
86 #endif
87 }
88 
89 /*
90  * Write back the dirty D-caches and invalidate them.
91  */
92 static void sh2a__flush_purge_region(void *start, int size)
93 {
94 	unsigned long v;
95 	unsigned long begin, end;
96 	unsigned long flags;
97 
98 	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
99 	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
100 		& ~(L1_CACHE_BYTES-1);
101 
102 	local_irq_save(flags);
103 	jump_to_uncached();
104 
105 	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
106 #ifdef CONFIG_CACHE_WRITEBACK
107 		int way;
108 		int nr_ways = current_cpu_data.dcache.ways;
109 		for (way = 0; way < nr_ways; way++)
110 			sh2a_flush_oc_line(v, way);
111 #endif
112 		sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
113 	}
114 
115 	back_to_cached();
116 	local_irq_restore(flags);
117 }
118 
119 /*
120  * Invalidate the D-caches, but no write back please
121  */
122 static void sh2a__flush_invalidate_region(void *start, int size)
123 {
124 	unsigned long v;
125 	unsigned long begin, end;
126 	unsigned long flags;
127 
128 	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
129 	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
130 		& ~(L1_CACHE_BYTES-1);
131 
132 	local_irq_save(flags);
133 	jump_to_uncached();
134 
135 	/* If there are too many pages then just blow the cache */
136 	if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
137 		__raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE,
138 			     SH_CCR);
139 	} else {
140 		for (v = begin; v < end; v += L1_CACHE_BYTES)
141 			sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
142 	}
143 
144 	back_to_cached();
145 	local_irq_restore(flags);
146 }
147 
148 /*
149  * Write back the range of D-cache, and purge the I-cache.
150  */
151 static void sh2a_flush_icache_range(void *args)
152 {
153 	struct flusher_data *data = args;
154 	unsigned long start, end;
155 	unsigned long v;
156 	unsigned long flags;
157 
158 	start = data->addr1 & ~(L1_CACHE_BYTES-1);
159 	end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1);
160 
161 #ifdef CONFIG_CACHE_WRITEBACK
162 	sh2a__flush_wback_region((void *)start, end-start);
163 #endif
164 
165 	local_irq_save(flags);
166 	jump_to_uncached();
167 
168 	/* I-Cache invalidate */
169 	/* If there are too many pages then just blow the cache */
170 	if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
171 		__raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE,
172 			     SH_CCR);
173 	} else {
174 		for (v = start; v < end; v += L1_CACHE_BYTES)
175 			sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
176 	}
177 
178 	back_to_cached();
179 	local_irq_restore(flags);
180 }
181 
182 void __init sh2a_cache_init(void)
183 {
184 	local_flush_icache_range	= sh2a_flush_icache_range;
185 
186 	__flush_wback_region		= sh2a__flush_wback_region;
187 	__flush_purge_region		= sh2a__flush_purge_region;
188 	__flush_invalidate_region	= sh2a__flush_invalidate_region;
189 }
190