1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/init.h> 20 #include <linux/spinlock.h> 21 #include <linux/io.h> 22 23 #include <asm/cacheflush.h> 24 #include <asm/hardware/cache-l2x0.h> 25 26 #define CACHE_LINE_SIZE 32 27 28 static void __iomem *l2x0_base; 29 static DEFINE_SPINLOCK(l2x0_lock); 30 static uint32_t l2x0_way_mask; /* Bitmask of active ways */ 31 32 static inline void cache_wait(void __iomem *reg, unsigned long mask) 33 { 34 /* wait for the operation to complete */ 35 while (readl(reg) & mask) 36 ; 37 } 38 39 static inline void cache_sync(void) 40 { 41 void __iomem *base = l2x0_base; 42 writel(0, base + L2X0_CACHE_SYNC); 43 cache_wait(base + L2X0_CACHE_SYNC, 1); 44 } 45 46 static inline void l2x0_clean_line(unsigned long addr) 47 { 48 void __iomem *base = l2x0_base; 49 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 50 writel(addr, base + L2X0_CLEAN_LINE_PA); 51 } 52 53 static inline void l2x0_inv_line(unsigned long addr) 54 { 55 void __iomem *base = l2x0_base; 56 cache_wait(base + L2X0_INV_LINE_PA, 1); 57 writel(addr, base + L2X0_INV_LINE_PA); 58 } 59 60 #ifdef CONFIG_PL310_ERRATA_588369 61 static void debug_writel(unsigned long val) 62 { 63 extern void omap_smc1(u32 fn, u32 arg); 64 65 /* 66 * Texas Instrument secure monitor api to modify the 67 * PL310 Debug Control Register. 68 */ 69 omap_smc1(0x100, val); 70 } 71 72 static inline void l2x0_flush_line(unsigned long addr) 73 { 74 void __iomem *base = l2x0_base; 75 76 /* Clean by PA followed by Invalidate by PA */ 77 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 78 writel(addr, base + L2X0_CLEAN_LINE_PA); 79 cache_wait(base + L2X0_INV_LINE_PA, 1); 80 writel(addr, base + L2X0_INV_LINE_PA); 81 } 82 #else 83 84 /* Optimised out for non-errata case */ 85 static inline void debug_writel(unsigned long val) 86 { 87 } 88 89 static inline void l2x0_flush_line(unsigned long addr) 90 { 91 void __iomem *base = l2x0_base; 92 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 93 writel(addr, base + L2X0_CLEAN_INV_LINE_PA); 94 } 95 #endif 96 97 static void l2x0_cache_sync(void) 98 { 99 unsigned long flags; 100 101 spin_lock_irqsave(&l2x0_lock, flags); 102 cache_sync(); 103 spin_unlock_irqrestore(&l2x0_lock, flags); 104 } 105 106 static inline void l2x0_inv_all(void) 107 { 108 unsigned long flags; 109 110 /* invalidate all ways */ 111 spin_lock_irqsave(&l2x0_lock, flags); 112 writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 113 cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); 114 cache_sync(); 115 spin_unlock_irqrestore(&l2x0_lock, flags); 116 } 117 118 static void l2x0_inv_range(unsigned long start, unsigned long end) 119 { 120 void __iomem *base = l2x0_base; 121 unsigned long flags; 122 123 spin_lock_irqsave(&l2x0_lock, flags); 124 if (start & (CACHE_LINE_SIZE - 1)) { 125 start &= ~(CACHE_LINE_SIZE - 1); 126 debug_writel(0x03); 127 l2x0_flush_line(start); 128 debug_writel(0x00); 129 start += CACHE_LINE_SIZE; 130 } 131 132 if (end & (CACHE_LINE_SIZE - 1)) { 133 end &= ~(CACHE_LINE_SIZE - 1); 134 debug_writel(0x03); 135 l2x0_flush_line(end); 136 debug_writel(0x00); 137 } 138 139 while (start < end) { 140 unsigned long blk_end = start + min(end - start, 4096UL); 141 142 while (start < blk_end) { 143 l2x0_inv_line(start); 144 start += CACHE_LINE_SIZE; 145 } 146 147 if (blk_end < end) { 148 spin_unlock_irqrestore(&l2x0_lock, flags); 149 spin_lock_irqsave(&l2x0_lock, flags); 150 } 151 } 152 cache_wait(base + L2X0_INV_LINE_PA, 1); 153 cache_sync(); 154 spin_unlock_irqrestore(&l2x0_lock, flags); 155 } 156 157 static void l2x0_clean_range(unsigned long start, unsigned long end) 158 { 159 void __iomem *base = l2x0_base; 160 unsigned long flags; 161 162 spin_lock_irqsave(&l2x0_lock, flags); 163 start &= ~(CACHE_LINE_SIZE - 1); 164 while (start < end) { 165 unsigned long blk_end = start + min(end - start, 4096UL); 166 167 while (start < blk_end) { 168 l2x0_clean_line(start); 169 start += CACHE_LINE_SIZE; 170 } 171 172 if (blk_end < end) { 173 spin_unlock_irqrestore(&l2x0_lock, flags); 174 spin_lock_irqsave(&l2x0_lock, flags); 175 } 176 } 177 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 178 cache_sync(); 179 spin_unlock_irqrestore(&l2x0_lock, flags); 180 } 181 182 static void l2x0_flush_range(unsigned long start, unsigned long end) 183 { 184 void __iomem *base = l2x0_base; 185 unsigned long flags; 186 187 spin_lock_irqsave(&l2x0_lock, flags); 188 start &= ~(CACHE_LINE_SIZE - 1); 189 while (start < end) { 190 unsigned long blk_end = start + min(end - start, 4096UL); 191 192 debug_writel(0x03); 193 while (start < blk_end) { 194 l2x0_flush_line(start); 195 start += CACHE_LINE_SIZE; 196 } 197 debug_writel(0x00); 198 199 if (blk_end < end) { 200 spin_unlock_irqrestore(&l2x0_lock, flags); 201 spin_lock_irqsave(&l2x0_lock, flags); 202 } 203 } 204 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 205 cache_sync(); 206 spin_unlock_irqrestore(&l2x0_lock, flags); 207 } 208 209 void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 210 { 211 __u32 aux; 212 __u32 cache_id; 213 int ways; 214 const char *type; 215 216 l2x0_base = base; 217 218 cache_id = readl(l2x0_base + L2X0_CACHE_ID); 219 aux = readl(l2x0_base + L2X0_AUX_CTRL); 220 221 /* Determine the number of ways */ 222 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 223 case L2X0_CACHE_ID_PART_L310: 224 if (aux & (1 << 16)) 225 ways = 16; 226 else 227 ways = 8; 228 type = "L310"; 229 break; 230 case L2X0_CACHE_ID_PART_L210: 231 ways = (aux >> 13) & 0xf; 232 type = "L210"; 233 break; 234 default: 235 /* Assume unknown chips have 8 ways */ 236 ways = 8; 237 type = "L2x0 series"; 238 break; 239 } 240 241 l2x0_way_mask = (1 << ways) - 1; 242 243 /* 244 * Check if l2x0 controller is already enabled. 245 * If you are booting from non-secure mode 246 * accessing the below registers will fault. 247 */ 248 if (!(readl(l2x0_base + L2X0_CTRL) & 1)) { 249 250 /* l2x0 controller is disabled */ 251 aux &= aux_mask; 252 aux |= aux_val; 253 writel(aux, l2x0_base + L2X0_AUX_CTRL); 254 255 l2x0_inv_all(); 256 257 /* enable L2X0 */ 258 writel(1, l2x0_base + L2X0_CTRL); 259 } 260 261 outer_cache.inv_range = l2x0_inv_range; 262 outer_cache.clean_range = l2x0_clean_range; 263 outer_cache.flush_range = l2x0_flush_range; 264 outer_cache.sync = l2x0_cache_sync; 265 266 printk(KERN_INFO "%s cache controller enabled\n", type); 267 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", 268 ways, cache_id, aux); 269 } 270