xref: /linux/arch/arm/mm/cache-l2x0.c (revision f79e4d5f92a129a1159c973735007d4ddc8541f3)
1 /*
2  * arch/arm/mm/cache-l2x0.c - L210/L220/L310 cache controller support
3  *
4  * Copyright (C) 2007 ARM Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18  */
19 #include <linux/cpu.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/smp.h>
23 #include <linux/spinlock.h>
24 #include <linux/log2.h>
25 #include <linux/io.h>
26 #include <linux/of.h>
27 #include <linux/of_address.h>
28 
29 #include <asm/cacheflush.h>
30 #include <asm/cp15.h>
31 #include <asm/cputype.h>
32 #include <asm/hardware/cache-l2x0.h>
33 #include "cache-tauros3.h"
34 #include "cache-aurora-l2.h"
35 
36 struct l2c_init_data {
37 	const char *type;
38 	unsigned way_size_0;
39 	unsigned num_lock;
40 	void (*of_parse)(const struct device_node *, u32 *, u32 *);
41 	void (*enable)(void __iomem *, unsigned);
42 	void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
43 	void (*save)(void __iomem *);
44 	void (*configure)(void __iomem *);
45 	void (*unlock)(void __iomem *, unsigned);
46 	struct outer_cache_fns outer_cache;
47 };
48 
49 #define CACHE_LINE_SIZE		32
50 
51 static void __iomem *l2x0_base;
52 static const struct l2c_init_data *l2x0_data;
53 static DEFINE_RAW_SPINLOCK(l2x0_lock);
54 static u32 l2x0_way_mask;	/* Bitmask of active ways */
55 static u32 l2x0_size;
56 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
57 
58 struct l2x0_regs l2x0_saved_regs;
59 
60 static bool l2x0_bresp_disable;
61 static bool l2x0_flz_disable;
62 
63 /*
64  * Common code for all cache controllers.
65  */
66 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
67 {
68 	/* wait for cache operation by line or way to complete */
69 	while (readl_relaxed(reg) & mask)
70 		cpu_relax();
71 }
72 
73 /*
74  * By default, we write directly to secure registers.  Platforms must
75  * override this if they are running non-secure.
76  */
77 static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
78 {
79 	if (val == readl_relaxed(base + reg))
80 		return;
81 	if (outer_cache.write_sec)
82 		outer_cache.write_sec(val, reg);
83 	else
84 		writel_relaxed(val, base + reg);
85 }
86 
87 /*
88  * This should only be called when we have a requirement that the
89  * register be written due to a work-around, as platforms running
90  * in non-secure mode may not be able to access this register.
91  */
92 static inline void l2c_set_debug(void __iomem *base, unsigned long val)
93 {
94 	l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
95 }
96 
97 static void __l2c_op_way(void __iomem *reg)
98 {
99 	writel_relaxed(l2x0_way_mask, reg);
100 	l2c_wait_mask(reg, l2x0_way_mask);
101 }
102 
103 static inline void l2c_unlock(void __iomem *base, unsigned num)
104 {
105 	unsigned i;
106 
107 	for (i = 0; i < num; i++) {
108 		writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
109 			       i * L2X0_LOCKDOWN_STRIDE);
110 		writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
111 			       i * L2X0_LOCKDOWN_STRIDE);
112 	}
113 }
114 
115 static void l2c_configure(void __iomem *base)
116 {
117 	l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL);
118 }
119 
120 /*
121  * Enable the L2 cache controller.  This function must only be
122  * called when the cache controller is known to be disabled.
123  */
124 static void l2c_enable(void __iomem *base, unsigned num_lock)
125 {
126 	unsigned long flags;
127 
128 	if (outer_cache.configure)
129 		outer_cache.configure(&l2x0_saved_regs);
130 	else
131 		l2x0_data->configure(base);
132 
133 	l2x0_data->unlock(base, num_lock);
134 
135 	local_irq_save(flags);
136 	__l2c_op_way(base + L2X0_INV_WAY);
137 	writel_relaxed(0, base + sync_reg_offset);
138 	l2c_wait_mask(base + sync_reg_offset, 1);
139 	local_irq_restore(flags);
140 
141 	l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
142 }
143 
144 static void l2c_disable(void)
145 {
146 	void __iomem *base = l2x0_base;
147 
148 	l2x0_pmu_suspend();
149 
150 	outer_cache.flush_all();
151 	l2c_write_sec(0, base, L2X0_CTRL);
152 	dsb(st);
153 }
154 
155 static void l2c_save(void __iomem *base)
156 {
157 	l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
158 }
159 
160 static void l2c_resume(void)
161 {
162 	void __iomem *base = l2x0_base;
163 
164 	/* Do not touch the controller if already enabled. */
165 	if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
166 		l2c_enable(base, l2x0_data->num_lock);
167 
168 	l2x0_pmu_resume();
169 }
170 
171 /*
172  * L2C-210 specific code.
173  *
174  * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
175  * ensure that no background operation is running.  The way operations
176  * are all background tasks.
177  *
178  * While a background operation is in progress, any new operation is
179  * ignored (unspecified whether this causes an error.)  Thankfully, not
180  * used on SMP.
181  *
182  * Never has a different sync register other than L2X0_CACHE_SYNC, but
183  * we use sync_reg_offset here so we can share some of this with L2C-310.
184  */
185 static void __l2c210_cache_sync(void __iomem *base)
186 {
187 	writel_relaxed(0, base + sync_reg_offset);
188 }
189 
190 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
191 	unsigned long end)
192 {
193 	while (start < end) {
194 		writel_relaxed(start, reg);
195 		start += CACHE_LINE_SIZE;
196 	}
197 }
198 
199 static void l2c210_inv_range(unsigned long start, unsigned long end)
200 {
201 	void __iomem *base = l2x0_base;
202 
203 	if (start & (CACHE_LINE_SIZE - 1)) {
204 		start &= ~(CACHE_LINE_SIZE - 1);
205 		writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
206 		start += CACHE_LINE_SIZE;
207 	}
208 
209 	if (end & (CACHE_LINE_SIZE - 1)) {
210 		end &= ~(CACHE_LINE_SIZE - 1);
211 		writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
212 	}
213 
214 	__l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
215 	__l2c210_cache_sync(base);
216 }
217 
218 static void l2c210_clean_range(unsigned long start, unsigned long end)
219 {
220 	void __iomem *base = l2x0_base;
221 
222 	start &= ~(CACHE_LINE_SIZE - 1);
223 	__l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
224 	__l2c210_cache_sync(base);
225 }
226 
227 static void l2c210_flush_range(unsigned long start, unsigned long end)
228 {
229 	void __iomem *base = l2x0_base;
230 
231 	start &= ~(CACHE_LINE_SIZE - 1);
232 	__l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
233 	__l2c210_cache_sync(base);
234 }
235 
236 static void l2c210_flush_all(void)
237 {
238 	void __iomem *base = l2x0_base;
239 
240 	BUG_ON(!irqs_disabled());
241 
242 	__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
243 	__l2c210_cache_sync(base);
244 }
245 
246 static void l2c210_sync(void)
247 {
248 	__l2c210_cache_sync(l2x0_base);
249 }
250 
251 static const struct l2c_init_data l2c210_data __initconst = {
252 	.type = "L2C-210",
253 	.way_size_0 = SZ_8K,
254 	.num_lock = 1,
255 	.enable = l2c_enable,
256 	.save = l2c_save,
257 	.configure = l2c_configure,
258 	.unlock = l2c_unlock,
259 	.outer_cache = {
260 		.inv_range = l2c210_inv_range,
261 		.clean_range = l2c210_clean_range,
262 		.flush_range = l2c210_flush_range,
263 		.flush_all = l2c210_flush_all,
264 		.disable = l2c_disable,
265 		.sync = l2c210_sync,
266 		.resume = l2c_resume,
267 	},
268 };
269 
270 /*
271  * L2C-220 specific code.
272  *
273  * All operations are background operations: they have to be waited for.
274  * Conflicting requests generate a slave error (which will cause an
275  * imprecise abort.)  Never uses sync_reg_offset, so we hard-code the
276  * sync register here.
277  *
278  * However, we can re-use the l2c210_resume call.
279  */
280 static inline void __l2c220_cache_sync(void __iomem *base)
281 {
282 	writel_relaxed(0, base + L2X0_CACHE_SYNC);
283 	l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
284 }
285 
286 static void l2c220_op_way(void __iomem *base, unsigned reg)
287 {
288 	unsigned long flags;
289 
290 	raw_spin_lock_irqsave(&l2x0_lock, flags);
291 	__l2c_op_way(base + reg);
292 	__l2c220_cache_sync(base);
293 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
294 }
295 
296 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
297 	unsigned long end, unsigned long flags)
298 {
299 	raw_spinlock_t *lock = &l2x0_lock;
300 
301 	while (start < end) {
302 		unsigned long blk_end = start + min(end - start, 4096UL);
303 
304 		while (start < blk_end) {
305 			l2c_wait_mask(reg, 1);
306 			writel_relaxed(start, reg);
307 			start += CACHE_LINE_SIZE;
308 		}
309 
310 		if (blk_end < end) {
311 			raw_spin_unlock_irqrestore(lock, flags);
312 			raw_spin_lock_irqsave(lock, flags);
313 		}
314 	}
315 
316 	return flags;
317 }
318 
319 static void l2c220_inv_range(unsigned long start, unsigned long end)
320 {
321 	void __iomem *base = l2x0_base;
322 	unsigned long flags;
323 
324 	raw_spin_lock_irqsave(&l2x0_lock, flags);
325 	if ((start | end) & (CACHE_LINE_SIZE - 1)) {
326 		if (start & (CACHE_LINE_SIZE - 1)) {
327 			start &= ~(CACHE_LINE_SIZE - 1);
328 			writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
329 			start += CACHE_LINE_SIZE;
330 		}
331 
332 		if (end & (CACHE_LINE_SIZE - 1)) {
333 			end &= ~(CACHE_LINE_SIZE - 1);
334 			l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
335 			writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
336 		}
337 	}
338 
339 	flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
340 				   start, end, flags);
341 	l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
342 	__l2c220_cache_sync(base);
343 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
344 }
345 
346 static void l2c220_clean_range(unsigned long start, unsigned long end)
347 {
348 	void __iomem *base = l2x0_base;
349 	unsigned long flags;
350 
351 	start &= ~(CACHE_LINE_SIZE - 1);
352 	if ((end - start) >= l2x0_size) {
353 		l2c220_op_way(base, L2X0_CLEAN_WAY);
354 		return;
355 	}
356 
357 	raw_spin_lock_irqsave(&l2x0_lock, flags);
358 	flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
359 				   start, end, flags);
360 	l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
361 	__l2c220_cache_sync(base);
362 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
363 }
364 
365 static void l2c220_flush_range(unsigned long start, unsigned long end)
366 {
367 	void __iomem *base = l2x0_base;
368 	unsigned long flags;
369 
370 	start &= ~(CACHE_LINE_SIZE - 1);
371 	if ((end - start) >= l2x0_size) {
372 		l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
373 		return;
374 	}
375 
376 	raw_spin_lock_irqsave(&l2x0_lock, flags);
377 	flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
378 				   start, end, flags);
379 	l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
380 	__l2c220_cache_sync(base);
381 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
382 }
383 
384 static void l2c220_flush_all(void)
385 {
386 	l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
387 }
388 
389 static void l2c220_sync(void)
390 {
391 	unsigned long flags;
392 
393 	raw_spin_lock_irqsave(&l2x0_lock, flags);
394 	__l2c220_cache_sync(l2x0_base);
395 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
396 }
397 
398 static void l2c220_enable(void __iomem *base, unsigned num_lock)
399 {
400 	/*
401 	 * Always enable non-secure access to the lockdown registers -
402 	 * we write to them as part of the L2C enable sequence so they
403 	 * need to be accessible.
404 	 */
405 	l2x0_saved_regs.aux_ctrl |= L220_AUX_CTRL_NS_LOCKDOWN;
406 
407 	l2c_enable(base, num_lock);
408 }
409 
410 static void l2c220_unlock(void __iomem *base, unsigned num_lock)
411 {
412 	if (readl_relaxed(base + L2X0_AUX_CTRL) & L220_AUX_CTRL_NS_LOCKDOWN)
413 		l2c_unlock(base, num_lock);
414 }
415 
416 static const struct l2c_init_data l2c220_data = {
417 	.type = "L2C-220",
418 	.way_size_0 = SZ_8K,
419 	.num_lock = 1,
420 	.enable = l2c220_enable,
421 	.save = l2c_save,
422 	.configure = l2c_configure,
423 	.unlock = l2c220_unlock,
424 	.outer_cache = {
425 		.inv_range = l2c220_inv_range,
426 		.clean_range = l2c220_clean_range,
427 		.flush_range = l2c220_flush_range,
428 		.flush_all = l2c220_flush_all,
429 		.disable = l2c_disable,
430 		.sync = l2c220_sync,
431 		.resume = l2c_resume,
432 	},
433 };
434 
435 /*
436  * L2C-310 specific code.
437  *
438  * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
439  * and the way operations are all background tasks.  However, issuing an
440  * operation while a background operation is in progress results in a
441  * SLVERR response.  We can reuse:
442  *
443  *  __l2c210_cache_sync (using sync_reg_offset)
444  *  l2c210_sync
445  *  l2c210_inv_range (if 588369 is not applicable)
446  *  l2c210_clean_range
447  *  l2c210_flush_range (if 588369 is not applicable)
448  *  l2c210_flush_all (if 727915 is not applicable)
449  *
450  * Errata:
451  * 588369: PL310 R0P0->R1P0, fixed R2P0.
452  *	Affects: all clean+invalidate operations
453  *	clean and invalidate skips the invalidate step, so we need to issue
454  *	separate operations.  We also require the above debug workaround
455  *	enclosing this code fragment on affected parts.  On unaffected parts,
456  *	we must not use this workaround without the debug register writes
457  *	to avoid exposing a problem similar to 727915.
458  *
459  * 727915: PL310 R2P0->R3P0, fixed R3P1.
460  *	Affects: clean+invalidate by way
461  *	clean and invalidate by way runs in the background, and a store can
462  *	hit the line between the clean operation and invalidate operation,
463  *	resulting in the store being lost.
464  *
465  * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2.
466  *	Affects: 8x64-bit (double fill) line fetches
467  *	double fill line fetches can fail to cause dirty data to be evicted
468  *	from the cache before the new data overwrites the second line.
469  *
470  * 753970: PL310 R3P0, fixed R3P1.
471  *	Affects: sync
472  *	prevents merging writes after the sync operation, until another L2C
473  *	operation is performed (or a number of other conditions.)
474  *
475  * 769419: PL310 R0P0->R3P1, fixed R3P2.
476  *	Affects: store buffer
477  *	store buffer is not automatically drained.
478  */
479 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
480 {
481 	void __iomem *base = l2x0_base;
482 
483 	if ((start | end) & (CACHE_LINE_SIZE - 1)) {
484 		unsigned long flags;
485 
486 		/* Erratum 588369 for both clean+invalidate operations */
487 		raw_spin_lock_irqsave(&l2x0_lock, flags);
488 		l2c_set_debug(base, 0x03);
489 
490 		if (start & (CACHE_LINE_SIZE - 1)) {
491 			start &= ~(CACHE_LINE_SIZE - 1);
492 			writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
493 			writel_relaxed(start, base + L2X0_INV_LINE_PA);
494 			start += CACHE_LINE_SIZE;
495 		}
496 
497 		if (end & (CACHE_LINE_SIZE - 1)) {
498 			end &= ~(CACHE_LINE_SIZE - 1);
499 			writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
500 			writel_relaxed(end, base + L2X0_INV_LINE_PA);
501 		}
502 
503 		l2c_set_debug(base, 0x00);
504 		raw_spin_unlock_irqrestore(&l2x0_lock, flags);
505 	}
506 
507 	__l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
508 	__l2c210_cache_sync(base);
509 }
510 
511 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
512 {
513 	raw_spinlock_t *lock = &l2x0_lock;
514 	unsigned long flags;
515 	void __iomem *base = l2x0_base;
516 
517 	raw_spin_lock_irqsave(lock, flags);
518 	while (start < end) {
519 		unsigned long blk_end = start + min(end - start, 4096UL);
520 
521 		l2c_set_debug(base, 0x03);
522 		while (start < blk_end) {
523 			writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
524 			writel_relaxed(start, base + L2X0_INV_LINE_PA);
525 			start += CACHE_LINE_SIZE;
526 		}
527 		l2c_set_debug(base, 0x00);
528 
529 		if (blk_end < end) {
530 			raw_spin_unlock_irqrestore(lock, flags);
531 			raw_spin_lock_irqsave(lock, flags);
532 		}
533 	}
534 	raw_spin_unlock_irqrestore(lock, flags);
535 	__l2c210_cache_sync(base);
536 }
537 
538 static void l2c310_flush_all_erratum(void)
539 {
540 	void __iomem *base = l2x0_base;
541 	unsigned long flags;
542 
543 	raw_spin_lock_irqsave(&l2x0_lock, flags);
544 	l2c_set_debug(base, 0x03);
545 	__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
546 	l2c_set_debug(base, 0x00);
547 	__l2c210_cache_sync(base);
548 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
549 }
550 
551 static void __init l2c310_save(void __iomem *base)
552 {
553 	unsigned revision;
554 
555 	l2c_save(base);
556 
557 	l2x0_saved_regs.tag_latency = readl_relaxed(base +
558 		L310_TAG_LATENCY_CTRL);
559 	l2x0_saved_regs.data_latency = readl_relaxed(base +
560 		L310_DATA_LATENCY_CTRL);
561 	l2x0_saved_regs.filter_end = readl_relaxed(base +
562 		L310_ADDR_FILTER_END);
563 	l2x0_saved_regs.filter_start = readl_relaxed(base +
564 		L310_ADDR_FILTER_START);
565 
566 	revision = readl_relaxed(base + L2X0_CACHE_ID) &
567 			L2X0_CACHE_ID_RTL_MASK;
568 
569 	/* From r2p0, there is Prefetch offset/control register */
570 	if (revision >= L310_CACHE_ID_RTL_R2P0)
571 		l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
572 							L310_PREFETCH_CTRL);
573 
574 	/* From r3p0, there is Power control register */
575 	if (revision >= L310_CACHE_ID_RTL_R3P0)
576 		l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
577 							L310_POWER_CTRL);
578 }
579 
580 static void l2c310_configure(void __iomem *base)
581 {
582 	unsigned revision;
583 
584 	l2c_configure(base);
585 
586 	/* restore pl310 setup */
587 	l2c_write_sec(l2x0_saved_regs.tag_latency, base,
588 		      L310_TAG_LATENCY_CTRL);
589 	l2c_write_sec(l2x0_saved_regs.data_latency, base,
590 		      L310_DATA_LATENCY_CTRL);
591 	l2c_write_sec(l2x0_saved_regs.filter_end, base,
592 		      L310_ADDR_FILTER_END);
593 	l2c_write_sec(l2x0_saved_regs.filter_start, base,
594 		      L310_ADDR_FILTER_START);
595 
596 	revision = readl_relaxed(base + L2X0_CACHE_ID) &
597 				 L2X0_CACHE_ID_RTL_MASK;
598 
599 	if (revision >= L310_CACHE_ID_RTL_R2P0)
600 		l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
601 			      L310_PREFETCH_CTRL);
602 	if (revision >= L310_CACHE_ID_RTL_R3P0)
603 		l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
604 			      L310_POWER_CTRL);
605 }
606 
607 static int l2c310_starting_cpu(unsigned int cpu)
608 {
609 	set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
610 	return 0;
611 }
612 
613 static int l2c310_dying_cpu(unsigned int cpu)
614 {
615 	set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
616 	return 0;
617 }
618 
619 static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
620 {
621 	unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
622 	bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
623 	u32 aux = l2x0_saved_regs.aux_ctrl;
624 
625 	if (rev >= L310_CACHE_ID_RTL_R2P0) {
626 		if (cortex_a9 && !l2x0_bresp_disable) {
627 			aux |= L310_AUX_CTRL_EARLY_BRESP;
628 			pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
629 		} else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
630 			pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
631 			aux &= ~L310_AUX_CTRL_EARLY_BRESP;
632 		}
633 	}
634 
635 	if (cortex_a9 && !l2x0_flz_disable) {
636 		u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL);
637 		u32 acr = get_auxcr();
638 
639 		pr_debug("Cortex-A9 ACR=0x%08x\n", acr);
640 
641 		if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO))
642 			pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n");
643 
644 		if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3)))
645 			pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n");
646 
647 		if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) {
648 			aux |= L310_AUX_CTRL_FULL_LINE_ZERO;
649 			pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n");
650 		}
651 	} else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) {
652 		pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n");
653 		aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
654 	}
655 
656 	/*
657 	 * Always enable non-secure access to the lockdown registers -
658 	 * we write to them as part of the L2C enable sequence so they
659 	 * need to be accessible.
660 	 */
661 	l2x0_saved_regs.aux_ctrl = aux | L310_AUX_CTRL_NS_LOCKDOWN;
662 
663 	l2c_enable(base, num_lock);
664 
665 	/* Read back resulting AUX_CTRL value as it could have been altered. */
666 	aux = readl_relaxed(base + L2X0_AUX_CTRL);
667 
668 	if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
669 		u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
670 
671 		pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n",
672 			aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "",
673 			aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "",
674 			1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK));
675 	}
676 
677 	/* r3p0 or later has power control register */
678 	if (rev >= L310_CACHE_ID_RTL_R3P0) {
679 		u32 power_ctrl;
680 
681 		power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
682 		pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
683 			power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
684 			power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
685 	}
686 
687 	if (aux & L310_AUX_CTRL_FULL_LINE_ZERO)
688 		cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING,
689 				  "arm/l2x0:starting", l2c310_starting_cpu,
690 				  l2c310_dying_cpu);
691 }
692 
693 static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
694 	struct outer_cache_fns *fns)
695 {
696 	unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
697 	const char *errata[8];
698 	unsigned n = 0;
699 
700 	if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
701 	    revision < L310_CACHE_ID_RTL_R2P0 &&
702 	    /* For bcm compatibility */
703 	    fns->inv_range == l2c210_inv_range) {
704 		fns->inv_range = l2c310_inv_range_erratum;
705 		fns->flush_range = l2c310_flush_range_erratum;
706 		errata[n++] = "588369";
707 	}
708 
709 	if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
710 	    revision >= L310_CACHE_ID_RTL_R2P0 &&
711 	    revision < L310_CACHE_ID_RTL_R3P1) {
712 		fns->flush_all = l2c310_flush_all_erratum;
713 		errata[n++] = "727915";
714 	}
715 
716 	if (revision >= L310_CACHE_ID_RTL_R3P0 &&
717 	    revision < L310_CACHE_ID_RTL_R3P2) {
718 		u32 val = l2x0_saved_regs.prefetch_ctrl;
719 		if (val & L310_PREFETCH_CTRL_DBL_LINEFILL) {
720 			val &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
721 			l2x0_saved_regs.prefetch_ctrl = val;
722 			errata[n++] = "752271";
723 		}
724 	}
725 
726 	if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
727 	    revision == L310_CACHE_ID_RTL_R3P0) {
728 		sync_reg_offset = L2X0_DUMMY_REG;
729 		errata[n++] = "753970";
730 	}
731 
732 	if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
733 		errata[n++] = "769419";
734 
735 	if (n) {
736 		unsigned i;
737 
738 		pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
739 		for (i = 0; i < n; i++)
740 			pr_cont(" %s", errata[i]);
741 		pr_cont(" enabled\n");
742 	}
743 }
744 
745 static void l2c310_disable(void)
746 {
747 	/*
748 	 * If full-line-of-zeros is enabled, we must first disable it in the
749 	 * Cortex-A9 auxiliary control register before disabling the L2 cache.
750 	 */
751 	if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
752 		set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
753 
754 	l2c_disable();
755 }
756 
757 static void l2c310_resume(void)
758 {
759 	l2c_resume();
760 
761 	/* Re-enable full-line-of-zeros for Cortex-A9 */
762 	if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
763 		set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
764 }
765 
766 static void l2c310_unlock(void __iomem *base, unsigned num_lock)
767 {
768 	if (readl_relaxed(base + L2X0_AUX_CTRL) & L310_AUX_CTRL_NS_LOCKDOWN)
769 		l2c_unlock(base, num_lock);
770 }
771 
772 static const struct l2c_init_data l2c310_init_fns __initconst = {
773 	.type = "L2C-310",
774 	.way_size_0 = SZ_8K,
775 	.num_lock = 8,
776 	.enable = l2c310_enable,
777 	.fixup = l2c310_fixup,
778 	.save = l2c310_save,
779 	.configure = l2c310_configure,
780 	.unlock = l2c310_unlock,
781 	.outer_cache = {
782 		.inv_range = l2c210_inv_range,
783 		.clean_range = l2c210_clean_range,
784 		.flush_range = l2c210_flush_range,
785 		.flush_all = l2c210_flush_all,
786 		.disable = l2c310_disable,
787 		.sync = l2c210_sync,
788 		.resume = l2c310_resume,
789 	},
790 };
791 
792 static int __init __l2c_init(const struct l2c_init_data *data,
793 			     u32 aux_val, u32 aux_mask, u32 cache_id, bool nosync)
794 {
795 	struct outer_cache_fns fns;
796 	unsigned way_size_bits, ways;
797 	u32 aux, old_aux;
798 
799 	/*
800 	 * Save the pointer globally so that callbacks which do not receive
801 	 * context from callers can access the structure.
802 	 */
803 	l2x0_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
804 	if (!l2x0_data)
805 		return -ENOMEM;
806 
807 	/*
808 	 * Sanity check the aux values.  aux_mask is the bits we preserve
809 	 * from reading the hardware register, and aux_val is the bits we
810 	 * set.
811 	 */
812 	if (aux_val & aux_mask)
813 		pr_alert("L2C: platform provided aux values permit register corruption.\n");
814 
815 	old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
816 	aux &= aux_mask;
817 	aux |= aux_val;
818 
819 	if (old_aux != aux)
820 		pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n",
821 		        old_aux, aux);
822 
823 	/* Determine the number of ways */
824 	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
825 	case L2X0_CACHE_ID_PART_L310:
826 		if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16))
827 			pr_warn("L2C: DT/platform tries to modify or specify cache size\n");
828 		if (aux & (1 << 16))
829 			ways = 16;
830 		else
831 			ways = 8;
832 		break;
833 
834 	case L2X0_CACHE_ID_PART_L210:
835 	case L2X0_CACHE_ID_PART_L220:
836 		ways = (aux >> 13) & 0xf;
837 		break;
838 
839 	case AURORA_CACHE_ID:
840 		ways = (aux >> 13) & 0xf;
841 		ways = 2 << ((ways + 1) >> 2);
842 		break;
843 
844 	default:
845 		/* Assume unknown chips have 8 ways */
846 		ways = 8;
847 		break;
848 	}
849 
850 	l2x0_way_mask = (1 << ways) - 1;
851 
852 	/*
853 	 * way_size_0 is the size that a way_size value of zero would be
854 	 * given the calculation: way_size = way_size_0 << way_size_bits.
855 	 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
856 	 * then way_size_0 would be 8k.
857 	 *
858 	 * L2 cache size = number of ways * way size.
859 	 */
860 	way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
861 			L2C_AUX_CTRL_WAY_SIZE_SHIFT;
862 	l2x0_size = ways * (data->way_size_0 << way_size_bits);
863 
864 	fns = data->outer_cache;
865 	fns.write_sec = outer_cache.write_sec;
866 	fns.configure = outer_cache.configure;
867 	if (data->fixup)
868 		data->fixup(l2x0_base, cache_id, &fns);
869 	if (nosync) {
870 		pr_info("L2C: disabling outer sync\n");
871 		fns.sync = NULL;
872 	}
873 
874 	/*
875 	 * Check if l2x0 controller is already enabled.  If we are booting
876 	 * in non-secure mode accessing the below registers will fault.
877 	 */
878 	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
879 		l2x0_saved_regs.aux_ctrl = aux;
880 
881 		data->enable(l2x0_base, data->num_lock);
882 	}
883 
884 	outer_cache = fns;
885 
886 	/*
887 	 * It is strange to save the register state before initialisation,
888 	 * but hey, this is what the DT implementations decided to do.
889 	 */
890 	if (data->save)
891 		data->save(l2x0_base);
892 
893 	/* Re-read it in case some bits are reserved. */
894 	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
895 
896 	pr_info("%s cache controller enabled, %d ways, %d kB\n",
897 		data->type, ways, l2x0_size >> 10);
898 	pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
899 		data->type, cache_id, aux);
900 
901 	l2x0_pmu_register(l2x0_base, cache_id);
902 
903 	return 0;
904 }
905 
906 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
907 {
908 	const struct l2c_init_data *data;
909 	u32 cache_id;
910 
911 	l2x0_base = base;
912 
913 	cache_id = readl_relaxed(base + L2X0_CACHE_ID);
914 
915 	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
916 	default:
917 	case L2X0_CACHE_ID_PART_L210:
918 		data = &l2c210_data;
919 		break;
920 
921 	case L2X0_CACHE_ID_PART_L220:
922 		data = &l2c220_data;
923 		break;
924 
925 	case L2X0_CACHE_ID_PART_L310:
926 		data = &l2c310_init_fns;
927 		break;
928 	}
929 
930 	/* Read back current (default) hardware configuration */
931 	if (data->save)
932 		data->save(l2x0_base);
933 
934 	__l2c_init(data, aux_val, aux_mask, cache_id, false);
935 }
936 
937 #ifdef CONFIG_OF
938 static int l2_wt_override;
939 
940 /* Aurora don't have the cache ID register available, so we have to
941  * pass it though the device tree */
942 static u32 cache_id_part_number_from_dt;
943 
944 /**
945  * l2x0_cache_size_of_parse() - read cache size parameters from DT
946  * @np: the device tree node for the l2 cache
947  * @aux_val: pointer to machine-supplied auxilary register value, to
948  * be augmented by the call (bits to be set to 1)
949  * @aux_mask: pointer to machine-supplied auxilary register mask, to
950  * be augmented by the call (bits to be set to 0)
951  * @associativity: variable to return the calculated associativity in
952  * @max_way_size: the maximum size in bytes for the cache ways
953  */
954 static int __init l2x0_cache_size_of_parse(const struct device_node *np,
955 					    u32 *aux_val, u32 *aux_mask,
956 					    u32 *associativity,
957 					    u32 max_way_size)
958 {
959 	u32 mask = 0, val = 0;
960 	u32 cache_size = 0, sets = 0;
961 	u32 way_size_bits = 1;
962 	u32 way_size = 0;
963 	u32 block_size = 0;
964 	u32 line_size = 0;
965 
966 	of_property_read_u32(np, "cache-size", &cache_size);
967 	of_property_read_u32(np, "cache-sets", &sets);
968 	of_property_read_u32(np, "cache-block-size", &block_size);
969 	of_property_read_u32(np, "cache-line-size", &line_size);
970 
971 	if (!cache_size || !sets)
972 		return -ENODEV;
973 
974 	/* All these l2 caches have the same line = block size actually */
975 	if (!line_size) {
976 		if (block_size) {
977 			/* If linesize is not given, it is equal to blocksize */
978 			line_size = block_size;
979 		} else {
980 			/* Fall back to known size */
981 			pr_warn("L2C OF: no cache block/line size given: "
982 				"falling back to default size %d bytes\n",
983 				CACHE_LINE_SIZE);
984 			line_size = CACHE_LINE_SIZE;
985 		}
986 	}
987 
988 	if (line_size != CACHE_LINE_SIZE)
989 		pr_warn("L2C OF: DT supplied line size %d bytes does "
990 			"not match hardware line size of %d bytes\n",
991 			line_size,
992 			CACHE_LINE_SIZE);
993 
994 	/*
995 	 * Since:
996 	 * set size = cache size / sets
997 	 * ways = cache size / (sets * line size)
998 	 * way size = cache size / (cache size / (sets * line size))
999 	 * way size = sets * line size
1000 	 * associativity = ways = cache size / way size
1001 	 */
1002 	way_size = sets * line_size;
1003 	*associativity = cache_size / way_size;
1004 
1005 	if (way_size > max_way_size) {
1006 		pr_err("L2C OF: set size %dKB is too large\n", way_size);
1007 		return -EINVAL;
1008 	}
1009 
1010 	pr_info("L2C OF: override cache size: %d bytes (%dKB)\n",
1011 		cache_size, cache_size >> 10);
1012 	pr_info("L2C OF: override line size: %d bytes\n", line_size);
1013 	pr_info("L2C OF: override way size: %d bytes (%dKB)\n",
1014 		way_size, way_size >> 10);
1015 	pr_info("L2C OF: override associativity: %d\n", *associativity);
1016 
1017 	/*
1018 	 * Calculates the bits 17:19 to set for way size:
1019 	 * 512KB -> 6, 256KB -> 5, ... 16KB -> 1
1020 	 */
1021 	way_size_bits = ilog2(way_size >> 10) - 3;
1022 	if (way_size_bits < 1 || way_size_bits > 6) {
1023 		pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n",
1024 		       way_size);
1025 		return -EINVAL;
1026 	}
1027 
1028 	mask |= L2C_AUX_CTRL_WAY_SIZE_MASK;
1029 	val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT);
1030 
1031 	*aux_val &= ~mask;
1032 	*aux_val |= val;
1033 	*aux_mask &= ~mask;
1034 
1035 	return 0;
1036 }
1037 
1038 static void __init l2x0_of_parse(const struct device_node *np,
1039 				 u32 *aux_val, u32 *aux_mask)
1040 {
1041 	u32 data[2] = { 0, 0 };
1042 	u32 tag = 0;
1043 	u32 dirty = 0;
1044 	u32 val = 0, mask = 0;
1045 	u32 assoc;
1046 	int ret;
1047 
1048 	of_property_read_u32(np, "arm,tag-latency", &tag);
1049 	if (tag) {
1050 		mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
1051 		val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
1052 	}
1053 
1054 	of_property_read_u32_array(np, "arm,data-latency",
1055 				   data, ARRAY_SIZE(data));
1056 	if (data[0] && data[1]) {
1057 		mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
1058 			L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
1059 		val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
1060 		       ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
1061 	}
1062 
1063 	of_property_read_u32(np, "arm,dirty-latency", &dirty);
1064 	if (dirty) {
1065 		mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
1066 		val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
1067 	}
1068 
1069 	if (of_property_read_bool(np, "arm,parity-enable")) {
1070 		mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1071 		val |= L2C_AUX_CTRL_PARITY_ENABLE;
1072 	} else if (of_property_read_bool(np, "arm,parity-disable")) {
1073 		mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1074 	}
1075 
1076 	if (of_property_read_bool(np, "arm,shared-override")) {
1077 		mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
1078 		val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
1079 	}
1080 
1081 	ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
1082 	if (ret)
1083 		return;
1084 
1085 	if (assoc > 8) {
1086 		pr_err("l2x0 of: cache setting yield too high associativity\n");
1087 		pr_err("l2x0 of: %d calculated, max 8\n", assoc);
1088 	} else {
1089 		mask |= L2X0_AUX_CTRL_ASSOC_MASK;
1090 		val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT);
1091 	}
1092 
1093 	*aux_val &= ~mask;
1094 	*aux_val |= val;
1095 	*aux_mask &= ~mask;
1096 }
1097 
1098 static const struct l2c_init_data of_l2c210_data __initconst = {
1099 	.type = "L2C-210",
1100 	.way_size_0 = SZ_8K,
1101 	.num_lock = 1,
1102 	.of_parse = l2x0_of_parse,
1103 	.enable = l2c_enable,
1104 	.save = l2c_save,
1105 	.configure = l2c_configure,
1106 	.unlock = l2c_unlock,
1107 	.outer_cache = {
1108 		.inv_range   = l2c210_inv_range,
1109 		.clean_range = l2c210_clean_range,
1110 		.flush_range = l2c210_flush_range,
1111 		.flush_all   = l2c210_flush_all,
1112 		.disable     = l2c_disable,
1113 		.sync        = l2c210_sync,
1114 		.resume      = l2c_resume,
1115 	},
1116 };
1117 
1118 static const struct l2c_init_data of_l2c220_data __initconst = {
1119 	.type = "L2C-220",
1120 	.way_size_0 = SZ_8K,
1121 	.num_lock = 1,
1122 	.of_parse = l2x0_of_parse,
1123 	.enable = l2c220_enable,
1124 	.save = l2c_save,
1125 	.configure = l2c_configure,
1126 	.unlock = l2c220_unlock,
1127 	.outer_cache = {
1128 		.inv_range   = l2c220_inv_range,
1129 		.clean_range = l2c220_clean_range,
1130 		.flush_range = l2c220_flush_range,
1131 		.flush_all   = l2c220_flush_all,
1132 		.disable     = l2c_disable,
1133 		.sync        = l2c220_sync,
1134 		.resume      = l2c_resume,
1135 	},
1136 };
1137 
1138 static void __init l2c310_of_parse(const struct device_node *np,
1139 	u32 *aux_val, u32 *aux_mask)
1140 {
1141 	u32 data[3] = { 0, 0, 0 };
1142 	u32 tag[3] = { 0, 0, 0 };
1143 	u32 filter[2] = { 0, 0 };
1144 	u32 assoc;
1145 	u32 prefetch;
1146 	u32 power;
1147 	u32 val;
1148 	int ret;
1149 
1150 	of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
1151 	if (tag[0] && tag[1] && tag[2])
1152 		l2x0_saved_regs.tag_latency =
1153 			L310_LATENCY_CTRL_RD(tag[0] - 1) |
1154 			L310_LATENCY_CTRL_WR(tag[1] - 1) |
1155 			L310_LATENCY_CTRL_SETUP(tag[2] - 1);
1156 
1157 	of_property_read_u32_array(np, "arm,data-latency",
1158 				   data, ARRAY_SIZE(data));
1159 	if (data[0] && data[1] && data[2])
1160 		l2x0_saved_regs.data_latency =
1161 			L310_LATENCY_CTRL_RD(data[0] - 1) |
1162 			L310_LATENCY_CTRL_WR(data[1] - 1) |
1163 			L310_LATENCY_CTRL_SETUP(data[2] - 1);
1164 
1165 	of_property_read_u32_array(np, "arm,filter-ranges",
1166 				   filter, ARRAY_SIZE(filter));
1167 	if (filter[1]) {
1168 		l2x0_saved_regs.filter_end =
1169 					ALIGN(filter[0] + filter[1], SZ_1M);
1170 		l2x0_saved_regs.filter_start = (filter[0] & ~(SZ_1M - 1))
1171 					| L310_ADDR_FILTER_EN;
1172 	}
1173 
1174 	ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
1175 	if (!ret) {
1176 		switch (assoc) {
1177 		case 16:
1178 			*aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1179 			*aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
1180 			*aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1181 			break;
1182 		case 8:
1183 			*aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1184 			*aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1185 			break;
1186 		default:
1187 			pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
1188 			       assoc);
1189 			break;
1190 		}
1191 	}
1192 
1193 	if (of_property_read_bool(np, "arm,shared-override")) {
1194 		*aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
1195 		*aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
1196 	}
1197 
1198 	if (of_property_read_bool(np, "arm,parity-enable")) {
1199 		*aux_val |= L2C_AUX_CTRL_PARITY_ENABLE;
1200 		*aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1201 	} else if (of_property_read_bool(np, "arm,parity-disable")) {
1202 		*aux_val &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1203 		*aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1204 	}
1205 
1206 	if (of_property_read_bool(np, "arm,early-bresp-disable"))
1207 		l2x0_bresp_disable = true;
1208 
1209 	if (of_property_read_bool(np, "arm,full-line-zero-disable"))
1210 		l2x0_flz_disable = true;
1211 
1212 	prefetch = l2x0_saved_regs.prefetch_ctrl;
1213 
1214 	ret = of_property_read_u32(np, "arm,double-linefill", &val);
1215 	if (ret == 0) {
1216 		if (val)
1217 			prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL;
1218 		else
1219 			prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
1220 	} else if (ret != -EINVAL) {
1221 		pr_err("L2C-310 OF arm,double-linefill property value is missing\n");
1222 	}
1223 
1224 	ret = of_property_read_u32(np, "arm,double-linefill-incr", &val);
1225 	if (ret == 0) {
1226 		if (val)
1227 			prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
1228 		else
1229 			prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
1230 	} else if (ret != -EINVAL) {
1231 		pr_err("L2C-310 OF arm,double-linefill-incr property value is missing\n");
1232 	}
1233 
1234 	ret = of_property_read_u32(np, "arm,double-linefill-wrap", &val);
1235 	if (ret == 0) {
1236 		if (!val)
1237 			prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
1238 		else
1239 			prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
1240 	} else if (ret != -EINVAL) {
1241 		pr_err("L2C-310 OF arm,double-linefill-wrap property value is missing\n");
1242 	}
1243 
1244 	ret = of_property_read_u32(np, "arm,prefetch-drop", &val);
1245 	if (ret == 0) {
1246 		if (val)
1247 			prefetch |= L310_PREFETCH_CTRL_PREFETCH_DROP;
1248 		else
1249 			prefetch &= ~L310_PREFETCH_CTRL_PREFETCH_DROP;
1250 	} else if (ret != -EINVAL) {
1251 		pr_err("L2C-310 OF arm,prefetch-drop property value is missing\n");
1252 	}
1253 
1254 	ret = of_property_read_u32(np, "arm,prefetch-offset", &val);
1255 	if (ret == 0) {
1256 		prefetch &= ~L310_PREFETCH_CTRL_OFFSET_MASK;
1257 		prefetch |= val & L310_PREFETCH_CTRL_OFFSET_MASK;
1258 	} else if (ret != -EINVAL) {
1259 		pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n");
1260 	}
1261 
1262 	ret = of_property_read_u32(np, "prefetch-data", &val);
1263 	if (ret == 0) {
1264 		if (val)
1265 			prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
1266 		else
1267 			prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
1268 	} else if (ret != -EINVAL) {
1269 		pr_err("L2C-310 OF prefetch-data property value is missing\n");
1270 	}
1271 
1272 	ret = of_property_read_u32(np, "prefetch-instr", &val);
1273 	if (ret == 0) {
1274 		if (val)
1275 			prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
1276 		else
1277 			prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
1278 	} else if (ret != -EINVAL) {
1279 		pr_err("L2C-310 OF prefetch-instr property value is missing\n");
1280 	}
1281 
1282 	l2x0_saved_regs.prefetch_ctrl = prefetch;
1283 
1284 	power = l2x0_saved_regs.pwr_ctrl |
1285 		L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN;
1286 
1287 	ret = of_property_read_u32(np, "arm,dynamic-clock-gating", &val);
1288 	if (!ret) {
1289 		if (!val)
1290 			power &= ~L310_DYNAMIC_CLK_GATING_EN;
1291 	} else if (ret != -EINVAL) {
1292 		pr_err("L2C-310 OF dynamic-clock-gating property value is missing or invalid\n");
1293 	}
1294 	ret = of_property_read_u32(np, "arm,standby-mode", &val);
1295 	if (!ret) {
1296 		if (!val)
1297 			power &= ~L310_STNDBY_MODE_EN;
1298 	} else if (ret != -EINVAL) {
1299 		pr_err("L2C-310 OF standby-mode property value is missing or invalid\n");
1300 	}
1301 
1302 	l2x0_saved_regs.pwr_ctrl = power;
1303 }
1304 
1305 static const struct l2c_init_data of_l2c310_data __initconst = {
1306 	.type = "L2C-310",
1307 	.way_size_0 = SZ_8K,
1308 	.num_lock = 8,
1309 	.of_parse = l2c310_of_parse,
1310 	.enable = l2c310_enable,
1311 	.fixup = l2c310_fixup,
1312 	.save  = l2c310_save,
1313 	.configure = l2c310_configure,
1314 	.unlock = l2c310_unlock,
1315 	.outer_cache = {
1316 		.inv_range   = l2c210_inv_range,
1317 		.clean_range = l2c210_clean_range,
1318 		.flush_range = l2c210_flush_range,
1319 		.flush_all   = l2c210_flush_all,
1320 		.disable     = l2c310_disable,
1321 		.sync        = l2c210_sync,
1322 		.resume      = l2c310_resume,
1323 	},
1324 };
1325 
1326 /*
1327  * This is a variant of the of_l2c310_data with .sync set to
1328  * NULL. Outer sync operations are not needed when the system is I/O
1329  * coherent, and potentially harmful in certain situations (PCIe/PL310
1330  * deadlock on Armada 375/38x due to hardware I/O coherency). The
1331  * other operations are kept because they are infrequent (therefore do
1332  * not cause the deadlock in practice) and needed for secondary CPU
1333  * boot and other power management activities.
1334  */
1335 static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
1336 	.type = "L2C-310 Coherent",
1337 	.way_size_0 = SZ_8K,
1338 	.num_lock = 8,
1339 	.of_parse = l2c310_of_parse,
1340 	.enable = l2c310_enable,
1341 	.fixup = l2c310_fixup,
1342 	.save  = l2c310_save,
1343 	.configure = l2c310_configure,
1344 	.unlock = l2c310_unlock,
1345 	.outer_cache = {
1346 		.inv_range   = l2c210_inv_range,
1347 		.clean_range = l2c210_clean_range,
1348 		.flush_range = l2c210_flush_range,
1349 		.flush_all   = l2c210_flush_all,
1350 		.disable     = l2c310_disable,
1351 		.resume      = l2c310_resume,
1352 	},
1353 };
1354 
1355 /*
1356  * Note that the end addresses passed to Linux primitives are
1357  * noninclusive, while the hardware cache range operations use
1358  * inclusive start and end addresses.
1359  */
1360 static unsigned long aurora_range_end(unsigned long start, unsigned long end)
1361 {
1362 	/*
1363 	 * Limit the number of cache lines processed at once,
1364 	 * since cache range operations stall the CPU pipeline
1365 	 * until completion.
1366 	 */
1367 	if (end > start + MAX_RANGE_SIZE)
1368 		end = start + MAX_RANGE_SIZE;
1369 
1370 	/*
1371 	 * Cache range operations can't straddle a page boundary.
1372 	 */
1373 	if (end > PAGE_ALIGN(start+1))
1374 		end = PAGE_ALIGN(start+1);
1375 
1376 	return end;
1377 }
1378 
1379 static void aurora_pa_range(unsigned long start, unsigned long end,
1380 			    unsigned long offset)
1381 {
1382 	void __iomem *base = l2x0_base;
1383 	unsigned long range_end;
1384 	unsigned long flags;
1385 
1386 	/*
1387 	 * round start and end adresses up to cache line size
1388 	 */
1389 	start &= ~(CACHE_LINE_SIZE - 1);
1390 	end = ALIGN(end, CACHE_LINE_SIZE);
1391 
1392 	/*
1393 	 * perform operation on all full cache lines between 'start' and 'end'
1394 	 */
1395 	while (start < end) {
1396 		range_end = aurora_range_end(start, end);
1397 
1398 		raw_spin_lock_irqsave(&l2x0_lock, flags);
1399 		writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG);
1400 		writel_relaxed(range_end - CACHE_LINE_SIZE, base + offset);
1401 		raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1402 
1403 		writel_relaxed(0, base + AURORA_SYNC_REG);
1404 		start = range_end;
1405 	}
1406 }
1407 static void aurora_inv_range(unsigned long start, unsigned long end)
1408 {
1409 	aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
1410 }
1411 
1412 static void aurora_clean_range(unsigned long start, unsigned long end)
1413 {
1414 	/*
1415 	 * If L2 is forced to WT, the L2 will always be clean and we
1416 	 * don't need to do anything here.
1417 	 */
1418 	if (!l2_wt_override)
1419 		aurora_pa_range(start, end, AURORA_CLEAN_RANGE_REG);
1420 }
1421 
1422 static void aurora_flush_range(unsigned long start, unsigned long end)
1423 {
1424 	if (l2_wt_override)
1425 		aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
1426 	else
1427 		aurora_pa_range(start, end, AURORA_FLUSH_RANGE_REG);
1428 }
1429 
1430 static void aurora_flush_all(void)
1431 {
1432 	void __iomem *base = l2x0_base;
1433 	unsigned long flags;
1434 
1435 	/* clean all ways */
1436 	raw_spin_lock_irqsave(&l2x0_lock, flags);
1437 	__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
1438 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1439 
1440 	writel_relaxed(0, base + AURORA_SYNC_REG);
1441 }
1442 
1443 static void aurora_cache_sync(void)
1444 {
1445 	writel_relaxed(0, l2x0_base + AURORA_SYNC_REG);
1446 }
1447 
1448 static void aurora_disable(void)
1449 {
1450 	void __iomem *base = l2x0_base;
1451 	unsigned long flags;
1452 
1453 	raw_spin_lock_irqsave(&l2x0_lock, flags);
1454 	__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
1455 	writel_relaxed(0, base + AURORA_SYNC_REG);
1456 	l2c_write_sec(0, base, L2X0_CTRL);
1457 	dsb(st);
1458 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1459 }
1460 
1461 static void aurora_save(void __iomem *base)
1462 {
1463 	l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
1464 	l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
1465 }
1466 
1467 /*
1468  * For Aurora cache in no outer mode, enable via the CP15 coprocessor
1469  * broadcasting of cache commands to L2.
1470  */
1471 static void __init aurora_enable_no_outer(void __iomem *base,
1472 	unsigned num_lock)
1473 {
1474 	u32 u;
1475 
1476 	asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
1477 	u |= AURORA_CTRL_FW;		/* Set the FW bit */
1478 	asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
1479 
1480 	isb();
1481 
1482 	l2c_enable(base, num_lock);
1483 }
1484 
1485 static void __init aurora_fixup(void __iomem *base, u32 cache_id,
1486 	struct outer_cache_fns *fns)
1487 {
1488 	sync_reg_offset = AURORA_SYNC_REG;
1489 }
1490 
1491 static void __init aurora_of_parse(const struct device_node *np,
1492 				u32 *aux_val, u32 *aux_mask)
1493 {
1494 	u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
1495 	u32 mask =  AURORA_ACR_REPLACEMENT_MASK;
1496 
1497 	of_property_read_u32(np, "cache-id-part",
1498 			&cache_id_part_number_from_dt);
1499 
1500 	/* Determine and save the write policy */
1501 	l2_wt_override = of_property_read_bool(np, "wt-override");
1502 
1503 	if (l2_wt_override) {
1504 		val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
1505 		mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
1506 	}
1507 
1508 	*aux_val &= ~mask;
1509 	*aux_val |= val;
1510 	*aux_mask &= ~mask;
1511 }
1512 
1513 static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
1514 	.type = "Aurora",
1515 	.way_size_0 = SZ_4K,
1516 	.num_lock = 4,
1517 	.of_parse = aurora_of_parse,
1518 	.enable = l2c_enable,
1519 	.fixup = aurora_fixup,
1520 	.save  = aurora_save,
1521 	.configure = l2c_configure,
1522 	.unlock = l2c_unlock,
1523 	.outer_cache = {
1524 		.inv_range   = aurora_inv_range,
1525 		.clean_range = aurora_clean_range,
1526 		.flush_range = aurora_flush_range,
1527 		.flush_all   = aurora_flush_all,
1528 		.disable     = aurora_disable,
1529 		.sync	     = aurora_cache_sync,
1530 		.resume      = l2c_resume,
1531 	},
1532 };
1533 
1534 static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
1535 	.type = "Aurora",
1536 	.way_size_0 = SZ_4K,
1537 	.num_lock = 4,
1538 	.of_parse = aurora_of_parse,
1539 	.enable = aurora_enable_no_outer,
1540 	.fixup = aurora_fixup,
1541 	.save  = aurora_save,
1542 	.configure = l2c_configure,
1543 	.unlock = l2c_unlock,
1544 	.outer_cache = {
1545 		.resume      = l2c_resume,
1546 	},
1547 };
1548 
1549 /*
1550  * For certain Broadcom SoCs, depending on the address range, different offsets
1551  * need to be added to the address before passing it to L2 for
1552  * invalidation/clean/flush
1553  *
1554  * Section Address Range              Offset        EMI
1555  *   1     0x00000000 - 0x3FFFFFFF    0x80000000    VC
1556  *   2     0x40000000 - 0xBFFFFFFF    0x40000000    SYS
1557  *   3     0xC0000000 - 0xFFFFFFFF    0x80000000    VC
1558  *
1559  * When the start and end addresses have crossed two different sections, we
1560  * need to break the L2 operation into two, each within its own section.
1561  * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
1562  * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
1563  * 0xC0000000 - 0xC0001000
1564  *
1565  * Note 1:
1566  * By breaking a single L2 operation into two, we may potentially suffer some
1567  * performance hit, but keep in mind the cross section case is very rare
1568  *
1569  * Note 2:
1570  * We do not need to handle the case when the start address is in
1571  * Section 1 and the end address is in Section 3, since it is not a valid use
1572  * case
1573  *
1574  * Note 3:
1575  * Section 1 in practical terms can no longer be used on rev A2. Because of
1576  * that the code does not need to handle section 1 at all.
1577  *
1578  */
1579 #define BCM_SYS_EMI_START_ADDR        0x40000000UL
1580 #define BCM_VC_EMI_SEC3_START_ADDR    0xC0000000UL
1581 
1582 #define BCM_SYS_EMI_OFFSET            0x40000000UL
1583 #define BCM_VC_EMI_OFFSET             0x80000000UL
1584 
1585 static inline int bcm_addr_is_sys_emi(unsigned long addr)
1586 {
1587 	return (addr >= BCM_SYS_EMI_START_ADDR) &&
1588 		(addr < BCM_VC_EMI_SEC3_START_ADDR);
1589 }
1590 
1591 static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
1592 {
1593 	if (bcm_addr_is_sys_emi(addr))
1594 		return addr + BCM_SYS_EMI_OFFSET;
1595 	else
1596 		return addr + BCM_VC_EMI_OFFSET;
1597 }
1598 
1599 static void bcm_inv_range(unsigned long start, unsigned long end)
1600 {
1601 	unsigned long new_start, new_end;
1602 
1603 	BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1604 
1605 	if (unlikely(end <= start))
1606 		return;
1607 
1608 	new_start = bcm_l2_phys_addr(start);
1609 	new_end = bcm_l2_phys_addr(end);
1610 
1611 	/* normal case, no cross section between start and end */
1612 	if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1613 		l2c210_inv_range(new_start, new_end);
1614 		return;
1615 	}
1616 
1617 	/* They cross sections, so it can only be a cross from section
1618 	 * 2 to section 3
1619 	 */
1620 	l2c210_inv_range(new_start,
1621 		bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1622 	l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1623 		new_end);
1624 }
1625 
1626 static void bcm_clean_range(unsigned long start, unsigned long end)
1627 {
1628 	unsigned long new_start, new_end;
1629 
1630 	BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1631 
1632 	if (unlikely(end <= start))
1633 		return;
1634 
1635 	new_start = bcm_l2_phys_addr(start);
1636 	new_end = bcm_l2_phys_addr(end);
1637 
1638 	/* normal case, no cross section between start and end */
1639 	if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1640 		l2c210_clean_range(new_start, new_end);
1641 		return;
1642 	}
1643 
1644 	/* They cross sections, so it can only be a cross from section
1645 	 * 2 to section 3
1646 	 */
1647 	l2c210_clean_range(new_start,
1648 		bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1649 	l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1650 		new_end);
1651 }
1652 
1653 static void bcm_flush_range(unsigned long start, unsigned long end)
1654 {
1655 	unsigned long new_start, new_end;
1656 
1657 	BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1658 
1659 	if (unlikely(end <= start))
1660 		return;
1661 
1662 	if ((end - start) >= l2x0_size) {
1663 		outer_cache.flush_all();
1664 		return;
1665 	}
1666 
1667 	new_start = bcm_l2_phys_addr(start);
1668 	new_end = bcm_l2_phys_addr(end);
1669 
1670 	/* normal case, no cross section between start and end */
1671 	if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1672 		l2c210_flush_range(new_start, new_end);
1673 		return;
1674 	}
1675 
1676 	/* They cross sections, so it can only be a cross from section
1677 	 * 2 to section 3
1678 	 */
1679 	l2c210_flush_range(new_start,
1680 		bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1681 	l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1682 		new_end);
1683 }
1684 
1685 /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
1686 static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
1687 	.type = "BCM-L2C-310",
1688 	.way_size_0 = SZ_8K,
1689 	.num_lock = 8,
1690 	.of_parse = l2c310_of_parse,
1691 	.enable = l2c310_enable,
1692 	.save  = l2c310_save,
1693 	.configure = l2c310_configure,
1694 	.unlock = l2c310_unlock,
1695 	.outer_cache = {
1696 		.inv_range   = bcm_inv_range,
1697 		.clean_range = bcm_clean_range,
1698 		.flush_range = bcm_flush_range,
1699 		.flush_all   = l2c210_flush_all,
1700 		.disable     = l2c310_disable,
1701 		.sync        = l2c210_sync,
1702 		.resume      = l2c310_resume,
1703 	},
1704 };
1705 
1706 static void __init tauros3_save(void __iomem *base)
1707 {
1708 	l2c_save(base);
1709 
1710 	l2x0_saved_regs.aux2_ctrl =
1711 		readl_relaxed(base + TAUROS3_AUX2_CTRL);
1712 	l2x0_saved_regs.prefetch_ctrl =
1713 		readl_relaxed(base + L310_PREFETCH_CTRL);
1714 }
1715 
1716 static void tauros3_configure(void __iomem *base)
1717 {
1718 	l2c_configure(base);
1719 	writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1720 		       base + TAUROS3_AUX2_CTRL);
1721 	writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
1722 		       base + L310_PREFETCH_CTRL);
1723 }
1724 
1725 static const struct l2c_init_data of_tauros3_data __initconst = {
1726 	.type = "Tauros3",
1727 	.way_size_0 = SZ_8K,
1728 	.num_lock = 8,
1729 	.enable = l2c_enable,
1730 	.save  = tauros3_save,
1731 	.configure = tauros3_configure,
1732 	.unlock = l2c_unlock,
1733 	/* Tauros3 broadcasts L1 cache operations to L2 */
1734 	.outer_cache = {
1735 		.resume      = l2c_resume,
1736 	},
1737 };
1738 
1739 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
1740 static const struct of_device_id l2x0_ids[] __initconst = {
1741 	L2C_ID("arm,l210-cache", of_l2c210_data),
1742 	L2C_ID("arm,l220-cache", of_l2c220_data),
1743 	L2C_ID("arm,pl310-cache", of_l2c310_data),
1744 	L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1745 	L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1746 	L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1747 	L2C_ID("marvell,tauros3-cache", of_tauros3_data),
1748 	/* Deprecated IDs */
1749 	L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1750 	{}
1751 };
1752 
1753 int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
1754 {
1755 	const struct l2c_init_data *data;
1756 	struct device_node *np;
1757 	struct resource res;
1758 	u32 cache_id, old_aux;
1759 	u32 cache_level = 2;
1760 	bool nosync = false;
1761 
1762 	np = of_find_matching_node(NULL, l2x0_ids);
1763 	if (!np)
1764 		return -ENODEV;
1765 
1766 	if (of_address_to_resource(np, 0, &res))
1767 		return -ENODEV;
1768 
1769 	l2x0_base = ioremap(res.start, resource_size(&res));
1770 	if (!l2x0_base)
1771 		return -ENOMEM;
1772 
1773 	l2x0_saved_regs.phy_base = res.start;
1774 
1775 	data = of_match_node(l2x0_ids, np)->data;
1776 
1777 	if (of_device_is_compatible(np, "arm,pl310-cache") &&
1778 	    of_property_read_bool(np, "arm,io-coherent"))
1779 		data = &of_l2c310_coherent_data;
1780 
1781 	old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
1782 	if (old_aux != ((old_aux & aux_mask) | aux_val)) {
1783 		pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
1784 		        old_aux, (old_aux & aux_mask) | aux_val);
1785 	} else if (aux_mask != ~0U && aux_val != 0) {
1786 		pr_alert("L2C: platform provided aux values match the hardware, so have no effect.  Please remove them.\n");
1787 	}
1788 
1789 	/* All L2 caches are unified, so this property should be specified */
1790 	if (!of_property_read_bool(np, "cache-unified"))
1791 		pr_err("L2C: device tree omits to specify unified cache\n");
1792 
1793 	if (of_property_read_u32(np, "cache-level", &cache_level))
1794 		pr_err("L2C: device tree omits to specify cache-level\n");
1795 
1796 	if (cache_level != 2)
1797 		pr_err("L2C: device tree specifies invalid cache level\n");
1798 
1799 	nosync = of_property_read_bool(np, "arm,outer-sync-disable");
1800 
1801 	/* Read back current (default) hardware configuration */
1802 	if (data->save)
1803 		data->save(l2x0_base);
1804 
1805 	/* L2 configuration can only be changed if the cache is disabled */
1806 	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
1807 		if (data->of_parse)
1808 			data->of_parse(np, &aux_val, &aux_mask);
1809 
1810 	if (cache_id_part_number_from_dt)
1811 		cache_id = cache_id_part_number_from_dt;
1812 	else
1813 		cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1814 
1815 	return __l2c_init(data, aux_val, aux_mask, cache_id, nosync);
1816 }
1817 #endif
1818