1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * arch/arm/mm/cache-l2x0.c - L210/L220/L310 cache controller support 4 * 5 * Copyright (C) 2007 ARM Limited 6 */ 7 #include <linux/cpu.h> 8 #include <linux/err.h> 9 #include <linux/init.h> 10 #include <linux/smp.h> 11 #include <linux/spinlock.h> 12 #include <linux/log2.h> 13 #include <linux/io.h> 14 #include <linux/of.h> 15 #include <linux/of_address.h> 16 #include <linux/string_choices.h> 17 18 #include <asm/cacheflush.h> 19 #include <asm/cp15.h> 20 #include <asm/cputype.h> 21 #include <asm/hardware/cache-l2x0.h> 22 #include <asm/hardware/cache-aurora-l2.h> 23 #include "cache-tauros3.h" 24 25 struct l2c_init_data { 26 const char *type; 27 unsigned way_size_0; 28 unsigned num_lock; 29 void (*of_parse)(const struct device_node *, u32 *, u32 *); 30 void (*enable)(void __iomem *, unsigned); 31 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); 32 void (*save)(void __iomem *); 33 void (*configure)(void __iomem *); 34 void (*unlock)(void __iomem *, unsigned); 35 struct outer_cache_fns outer_cache; 36 }; 37 38 #define CACHE_LINE_SIZE 32 39 40 static void __iomem *l2x0_base; 41 static const struct l2c_init_data *l2x0_data; 42 static DEFINE_RAW_SPINLOCK(l2x0_lock); 43 static u32 l2x0_way_mask; /* Bitmask of active ways */ 44 static u32 l2x0_size; 45 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 46 47 struct l2x0_regs l2x0_saved_regs; 48 49 static bool l2x0_bresp_disable; 50 static bool l2x0_flz_disable; 51 52 /* 53 * Common code for all cache controllers. 54 */ 55 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask) 56 { 57 /* wait for cache operation by line or way to complete */ 58 while (readl_relaxed(reg) & mask) 59 cpu_relax(); 60 } 61 62 /* 63 * By default, we write directly to secure registers. Platforms must 64 * override this if they are running non-secure. 65 */ 66 static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg) 67 { 68 if (val == readl_relaxed(base + reg)) 69 return; 70 if (outer_cache.write_sec) 71 outer_cache.write_sec(val, reg); 72 else 73 writel_relaxed(val, base + reg); 74 } 75 76 /* 77 * This should only be called when we have a requirement that the 78 * register be written due to a work-around, as platforms running 79 * in non-secure mode may not be able to access this register. 80 */ 81 static inline void l2c_set_debug(void __iomem *base, unsigned long val) 82 { 83 l2c_write_sec(val, base, L2X0_DEBUG_CTRL); 84 } 85 86 static void __l2c_op_way(void __iomem *reg) 87 { 88 writel_relaxed(l2x0_way_mask, reg); 89 l2c_wait_mask(reg, l2x0_way_mask); 90 } 91 92 static inline void l2c_unlock(void __iomem *base, unsigned num) 93 { 94 unsigned i; 95 96 for (i = 0; i < num; i++) { 97 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE + 98 i * L2X0_LOCKDOWN_STRIDE); 99 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE + 100 i * L2X0_LOCKDOWN_STRIDE); 101 } 102 } 103 104 static void l2c_configure(void __iomem *base) 105 { 106 l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL); 107 } 108 109 /* 110 * Enable the L2 cache controller. This function must only be 111 * called when the cache controller is known to be disabled. 112 */ 113 static void l2c_enable(void __iomem *base, unsigned num_lock) 114 { 115 unsigned long flags; 116 117 if (outer_cache.configure) 118 outer_cache.configure(&l2x0_saved_regs); 119 else 120 l2x0_data->configure(base); 121 122 l2x0_data->unlock(base, num_lock); 123 124 local_irq_save(flags); 125 __l2c_op_way(base + L2X0_INV_WAY); 126 writel_relaxed(0, base + sync_reg_offset); 127 l2c_wait_mask(base + sync_reg_offset, 1); 128 local_irq_restore(flags); 129 130 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL); 131 } 132 133 static void l2c_disable(void) 134 { 135 void __iomem *base = l2x0_base; 136 137 l2x0_pmu_suspend(); 138 139 outer_cache.flush_all(); 140 l2c_write_sec(0, base, L2X0_CTRL); 141 dsb(st); 142 } 143 144 static void l2c_save(void __iomem *base) 145 { 146 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 147 } 148 149 static void l2c_resume(void) 150 { 151 void __iomem *base = l2x0_base; 152 153 /* Do not touch the controller if already enabled. */ 154 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) 155 l2c_enable(base, l2x0_data->num_lock); 156 157 l2x0_pmu_resume(); 158 } 159 160 /* 161 * L2C-210 specific code. 162 * 163 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must 164 * ensure that no background operation is running. The way operations 165 * are all background tasks. 166 * 167 * While a background operation is in progress, any new operation is 168 * ignored (unspecified whether this causes an error.) Thankfully, not 169 * used on SMP. 170 * 171 * Never has a different sync register other than L2X0_CACHE_SYNC, but 172 * we use sync_reg_offset here so we can share some of this with L2C-310. 173 */ 174 static void __l2c210_cache_sync(void __iomem *base) 175 { 176 writel_relaxed(0, base + sync_reg_offset); 177 } 178 179 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start, 180 unsigned long end) 181 { 182 while (start < end) { 183 writel_relaxed(start, reg); 184 start += CACHE_LINE_SIZE; 185 } 186 } 187 188 static void l2c210_inv_range(unsigned long start, unsigned long end) 189 { 190 void __iomem *base = l2x0_base; 191 192 if (start & (CACHE_LINE_SIZE - 1)) { 193 start &= ~(CACHE_LINE_SIZE - 1); 194 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 195 start += CACHE_LINE_SIZE; 196 } 197 198 if (end & (CACHE_LINE_SIZE - 1)) { 199 end &= ~(CACHE_LINE_SIZE - 1); 200 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 201 } 202 203 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 204 __l2c210_cache_sync(base); 205 } 206 207 static void l2c210_clean_range(unsigned long start, unsigned long end) 208 { 209 void __iomem *base = l2x0_base; 210 211 start &= ~(CACHE_LINE_SIZE - 1); 212 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end); 213 __l2c210_cache_sync(base); 214 } 215 216 static void l2c210_flush_range(unsigned long start, unsigned long end) 217 { 218 void __iomem *base = l2x0_base; 219 220 start &= ~(CACHE_LINE_SIZE - 1); 221 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end); 222 __l2c210_cache_sync(base); 223 } 224 225 static void l2c210_flush_all(void) 226 { 227 void __iomem *base = l2x0_base; 228 229 BUG_ON(!irqs_disabled()); 230 231 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 232 __l2c210_cache_sync(base); 233 } 234 235 static void l2c210_sync(void) 236 { 237 __l2c210_cache_sync(l2x0_base); 238 } 239 240 static const struct l2c_init_data l2c210_data __initconst = { 241 .type = "L2C-210", 242 .way_size_0 = SZ_8K, 243 .num_lock = 1, 244 .enable = l2c_enable, 245 .save = l2c_save, 246 .configure = l2c_configure, 247 .unlock = l2c_unlock, 248 .outer_cache = { 249 .inv_range = l2c210_inv_range, 250 .clean_range = l2c210_clean_range, 251 .flush_range = l2c210_flush_range, 252 .flush_all = l2c210_flush_all, 253 .disable = l2c_disable, 254 .sync = l2c210_sync, 255 .resume = l2c_resume, 256 }, 257 }; 258 259 /* 260 * L2C-220 specific code. 261 * 262 * All operations are background operations: they have to be waited for. 263 * Conflicting requests generate a slave error (which will cause an 264 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the 265 * sync register here. 266 * 267 * However, we can re-use the l2c210_resume call. 268 */ 269 static inline void __l2c220_cache_sync(void __iomem *base) 270 { 271 writel_relaxed(0, base + L2X0_CACHE_SYNC); 272 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1); 273 } 274 275 static void l2c220_op_way(void __iomem *base, unsigned reg) 276 { 277 unsigned long flags; 278 279 raw_spin_lock_irqsave(&l2x0_lock, flags); 280 __l2c_op_way(base + reg); 281 __l2c220_cache_sync(base); 282 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 283 } 284 285 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start, 286 unsigned long end, unsigned long flags) 287 { 288 raw_spinlock_t *lock = &l2x0_lock; 289 290 while (start < end) { 291 unsigned long blk_end = start + min(end - start, 4096UL); 292 293 while (start < blk_end) { 294 l2c_wait_mask(reg, 1); 295 writel_relaxed(start, reg); 296 start += CACHE_LINE_SIZE; 297 } 298 299 if (blk_end < end) { 300 raw_spin_unlock_irqrestore(lock, flags); 301 raw_spin_lock_irqsave(lock, flags); 302 } 303 } 304 305 return flags; 306 } 307 308 static void l2c220_inv_range(unsigned long start, unsigned long end) 309 { 310 void __iomem *base = l2x0_base; 311 unsigned long flags; 312 313 raw_spin_lock_irqsave(&l2x0_lock, flags); 314 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 315 if (start & (CACHE_LINE_SIZE - 1)) { 316 start &= ~(CACHE_LINE_SIZE - 1); 317 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 318 start += CACHE_LINE_SIZE; 319 } 320 321 if (end & (CACHE_LINE_SIZE - 1)) { 322 end &= ~(CACHE_LINE_SIZE - 1); 323 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 324 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 325 } 326 } 327 328 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA, 329 start, end, flags); 330 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1); 331 __l2c220_cache_sync(base); 332 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 333 } 334 335 static void l2c220_clean_range(unsigned long start, unsigned long end) 336 { 337 void __iomem *base = l2x0_base; 338 unsigned long flags; 339 340 start &= ~(CACHE_LINE_SIZE - 1); 341 if ((end - start) >= l2x0_size) { 342 l2c220_op_way(base, L2X0_CLEAN_WAY); 343 return; 344 } 345 346 raw_spin_lock_irqsave(&l2x0_lock, flags); 347 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA, 348 start, end, flags); 349 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 350 __l2c220_cache_sync(base); 351 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 352 } 353 354 static void l2c220_flush_range(unsigned long start, unsigned long end) 355 { 356 void __iomem *base = l2x0_base; 357 unsigned long flags; 358 359 start &= ~(CACHE_LINE_SIZE - 1); 360 if ((end - start) >= l2x0_size) { 361 l2c220_op_way(base, L2X0_CLEAN_INV_WAY); 362 return; 363 } 364 365 raw_spin_lock_irqsave(&l2x0_lock, flags); 366 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, 367 start, end, flags); 368 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 369 __l2c220_cache_sync(base); 370 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 371 } 372 373 static void l2c220_flush_all(void) 374 { 375 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY); 376 } 377 378 static void l2c220_sync(void) 379 { 380 unsigned long flags; 381 382 raw_spin_lock_irqsave(&l2x0_lock, flags); 383 __l2c220_cache_sync(l2x0_base); 384 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 385 } 386 387 static void l2c220_enable(void __iomem *base, unsigned num_lock) 388 { 389 /* 390 * Always enable non-secure access to the lockdown registers - 391 * we write to them as part of the L2C enable sequence so they 392 * need to be accessible. 393 */ 394 l2x0_saved_regs.aux_ctrl |= L220_AUX_CTRL_NS_LOCKDOWN; 395 396 l2c_enable(base, num_lock); 397 } 398 399 static void l2c220_unlock(void __iomem *base, unsigned num_lock) 400 { 401 if (readl_relaxed(base + L2X0_AUX_CTRL) & L220_AUX_CTRL_NS_LOCKDOWN) 402 l2c_unlock(base, num_lock); 403 } 404 405 static const struct l2c_init_data l2c220_data = { 406 .type = "L2C-220", 407 .way_size_0 = SZ_8K, 408 .num_lock = 1, 409 .enable = l2c220_enable, 410 .save = l2c_save, 411 .configure = l2c_configure, 412 .unlock = l2c220_unlock, 413 .outer_cache = { 414 .inv_range = l2c220_inv_range, 415 .clean_range = l2c220_clean_range, 416 .flush_range = l2c220_flush_range, 417 .flush_all = l2c220_flush_all, 418 .disable = l2c_disable, 419 .sync = l2c220_sync, 420 .resume = l2c_resume, 421 }, 422 }; 423 424 /* 425 * L2C-310 specific code. 426 * 427 * Very similar to L2C-210, the PA, set/way and sync operations are atomic, 428 * and the way operations are all background tasks. However, issuing an 429 * operation while a background operation is in progress results in a 430 * SLVERR response. We can reuse: 431 * 432 * __l2c210_cache_sync (using sync_reg_offset) 433 * l2c210_sync 434 * l2c210_inv_range (if 588369 is not applicable) 435 * l2c210_clean_range 436 * l2c210_flush_range (if 588369 is not applicable) 437 * l2c210_flush_all (if 727915 is not applicable) 438 * 439 * Errata: 440 * 588369: PL310 R0P0->R1P0, fixed R2P0. 441 * Affects: all clean+invalidate operations 442 * clean and invalidate skips the invalidate step, so we need to issue 443 * separate operations. We also require the above debug workaround 444 * enclosing this code fragment on affected parts. On unaffected parts, 445 * we must not use this workaround without the debug register writes 446 * to avoid exposing a problem similar to 727915. 447 * 448 * 727915: PL310 R2P0->R3P0, fixed R3P1. 449 * Affects: clean+invalidate by way 450 * clean and invalidate by way runs in the background, and a store can 451 * hit the line between the clean operation and invalidate operation, 452 * resulting in the store being lost. 453 * 454 * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2. 455 * Affects: 8x64-bit (double fill) line fetches 456 * double fill line fetches can fail to cause dirty data to be evicted 457 * from the cache before the new data overwrites the second line. 458 * 459 * 753970: PL310 R3P0, fixed R3P1. 460 * Affects: sync 461 * prevents merging writes after the sync operation, until another L2C 462 * operation is performed (or a number of other conditions.) 463 * 464 * 769419: PL310 R0P0->R3P1, fixed R3P2. 465 * Affects: store buffer 466 * store buffer is not automatically drained. 467 */ 468 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end) 469 { 470 void __iomem *base = l2x0_base; 471 472 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 473 unsigned long flags; 474 475 /* Erratum 588369 for both clean+invalidate operations */ 476 raw_spin_lock_irqsave(&l2x0_lock, flags); 477 l2c_set_debug(base, 0x03); 478 479 if (start & (CACHE_LINE_SIZE - 1)) { 480 start &= ~(CACHE_LINE_SIZE - 1); 481 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 482 writel_relaxed(start, base + L2X0_INV_LINE_PA); 483 start += CACHE_LINE_SIZE; 484 } 485 486 if (end & (CACHE_LINE_SIZE - 1)) { 487 end &= ~(CACHE_LINE_SIZE - 1); 488 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA); 489 writel_relaxed(end, base + L2X0_INV_LINE_PA); 490 } 491 492 l2c_set_debug(base, 0x00); 493 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 494 } 495 496 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 497 __l2c210_cache_sync(base); 498 } 499 500 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end) 501 { 502 raw_spinlock_t *lock = &l2x0_lock; 503 unsigned long flags; 504 void __iomem *base = l2x0_base; 505 506 raw_spin_lock_irqsave(lock, flags); 507 while (start < end) { 508 unsigned long blk_end = start + min(end - start, 4096UL); 509 510 l2c_set_debug(base, 0x03); 511 while (start < blk_end) { 512 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 513 writel_relaxed(start, base + L2X0_INV_LINE_PA); 514 start += CACHE_LINE_SIZE; 515 } 516 l2c_set_debug(base, 0x00); 517 518 if (blk_end < end) { 519 raw_spin_unlock_irqrestore(lock, flags); 520 raw_spin_lock_irqsave(lock, flags); 521 } 522 } 523 raw_spin_unlock_irqrestore(lock, flags); 524 __l2c210_cache_sync(base); 525 } 526 527 static void l2c310_flush_all_erratum(void) 528 { 529 void __iomem *base = l2x0_base; 530 unsigned long flags; 531 532 raw_spin_lock_irqsave(&l2x0_lock, flags); 533 l2c_set_debug(base, 0x03); 534 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 535 l2c_set_debug(base, 0x00); 536 __l2c210_cache_sync(base); 537 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 538 } 539 540 static void __init l2c310_save(void __iomem *base) 541 { 542 unsigned revision; 543 544 l2c_save(base); 545 546 l2x0_saved_regs.tag_latency = readl_relaxed(base + 547 L310_TAG_LATENCY_CTRL); 548 l2x0_saved_regs.data_latency = readl_relaxed(base + 549 L310_DATA_LATENCY_CTRL); 550 l2x0_saved_regs.filter_end = readl_relaxed(base + 551 L310_ADDR_FILTER_END); 552 l2x0_saved_regs.filter_start = readl_relaxed(base + 553 L310_ADDR_FILTER_START); 554 555 revision = readl_relaxed(base + L2X0_CACHE_ID) & 556 L2X0_CACHE_ID_RTL_MASK; 557 558 /* From r2p0, there is Prefetch offset/control register */ 559 if (revision >= L310_CACHE_ID_RTL_R2P0) 560 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base + 561 L310_PREFETCH_CTRL); 562 563 /* From r3p0, there is Power control register */ 564 if (revision >= L310_CACHE_ID_RTL_R3P0) 565 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base + 566 L310_POWER_CTRL); 567 } 568 569 static void l2c310_configure(void __iomem *base) 570 { 571 unsigned revision; 572 573 l2c_configure(base); 574 575 /* restore pl310 setup */ 576 l2c_write_sec(l2x0_saved_regs.tag_latency, base, 577 L310_TAG_LATENCY_CTRL); 578 l2c_write_sec(l2x0_saved_regs.data_latency, base, 579 L310_DATA_LATENCY_CTRL); 580 l2c_write_sec(l2x0_saved_regs.filter_end, base, 581 L310_ADDR_FILTER_END); 582 l2c_write_sec(l2x0_saved_regs.filter_start, base, 583 L310_ADDR_FILTER_START); 584 585 revision = readl_relaxed(base + L2X0_CACHE_ID) & 586 L2X0_CACHE_ID_RTL_MASK; 587 588 if (revision >= L310_CACHE_ID_RTL_R2P0) 589 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base, 590 L310_PREFETCH_CTRL); 591 if (revision >= L310_CACHE_ID_RTL_R3P0) 592 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base, 593 L310_POWER_CTRL); 594 } 595 596 static int l2c310_starting_cpu(unsigned int cpu) 597 { 598 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); 599 return 0; 600 } 601 602 static int l2c310_dying_cpu(unsigned int cpu) 603 { 604 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1))); 605 return 0; 606 } 607 608 static void __init l2c310_enable(void __iomem *base, unsigned num_lock) 609 { 610 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK; 611 bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9; 612 u32 aux = l2x0_saved_regs.aux_ctrl; 613 614 if (rev >= L310_CACHE_ID_RTL_R2P0) { 615 if (cortex_a9 && !l2x0_bresp_disable) { 616 aux |= L310_AUX_CTRL_EARLY_BRESP; 617 pr_info("L2C-310 enabling early BRESP for Cortex-A9\n"); 618 } else if (aux & L310_AUX_CTRL_EARLY_BRESP) { 619 pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n"); 620 aux &= ~L310_AUX_CTRL_EARLY_BRESP; 621 } 622 } 623 624 if (cortex_a9 && !l2x0_flz_disable) { 625 u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL); 626 u32 acr = get_auxcr(); 627 628 pr_debug("Cortex-A9 ACR=0x%08x\n", acr); 629 630 if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO)) 631 pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n"); 632 633 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3))) 634 pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n"); 635 636 if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) { 637 aux |= L310_AUX_CTRL_FULL_LINE_ZERO; 638 pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n"); 639 } 640 } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) { 641 pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n"); 642 aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP); 643 } 644 645 /* 646 * Always enable non-secure access to the lockdown registers - 647 * we write to them as part of the L2C enable sequence so they 648 * need to be accessible. 649 */ 650 l2x0_saved_regs.aux_ctrl = aux | L310_AUX_CTRL_NS_LOCKDOWN; 651 652 l2c_enable(base, num_lock); 653 654 /* Read back resulting AUX_CTRL value as it could have been altered. */ 655 aux = readl_relaxed(base + L2X0_AUX_CTRL); 656 657 if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) { 658 u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL); 659 660 pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n", 661 aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "", 662 aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "", 663 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK)); 664 } 665 666 /* r3p0 or later has power control register */ 667 if (rev >= L310_CACHE_ID_RTL_R3P0) { 668 u32 power_ctrl; 669 670 power_ctrl = readl_relaxed(base + L310_POWER_CTRL); 671 pr_info("L2C-310 dynamic clock gating %s, standby mode %s\n", 672 str_enabled_disabled(power_ctrl & L310_DYNAMIC_CLK_GATING_EN), 673 str_enabled_disabled(power_ctrl & L310_STNDBY_MODE_EN)); 674 } 675 676 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) 677 cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING, 678 "arm/l2x0:starting", l2c310_starting_cpu, 679 l2c310_dying_cpu); 680 } 681 682 static void __init l2c310_fixup(void __iomem *base, u32 cache_id, 683 struct outer_cache_fns *fns) 684 { 685 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK; 686 const char *errata[8]; 687 unsigned n = 0; 688 689 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) && 690 revision < L310_CACHE_ID_RTL_R2P0 && 691 /* For bcm compatibility */ 692 fns->inv_range == l2c210_inv_range) { 693 fns->inv_range = l2c310_inv_range_erratum; 694 fns->flush_range = l2c310_flush_range_erratum; 695 errata[n++] = "588369"; 696 } 697 698 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) && 699 revision >= L310_CACHE_ID_RTL_R2P0 && 700 revision < L310_CACHE_ID_RTL_R3P1) { 701 fns->flush_all = l2c310_flush_all_erratum; 702 errata[n++] = "727915"; 703 } 704 705 if (revision >= L310_CACHE_ID_RTL_R3P0 && 706 revision < L310_CACHE_ID_RTL_R3P2) { 707 u32 val = l2x0_saved_regs.prefetch_ctrl; 708 if (val & L310_PREFETCH_CTRL_DBL_LINEFILL) { 709 val &= ~L310_PREFETCH_CTRL_DBL_LINEFILL; 710 l2x0_saved_regs.prefetch_ctrl = val; 711 errata[n++] = "752271"; 712 } 713 } 714 715 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) && 716 revision == L310_CACHE_ID_RTL_R3P0) { 717 sync_reg_offset = L2X0_DUMMY_REG; 718 errata[n++] = "753970"; 719 } 720 721 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419)) 722 errata[n++] = "769419"; 723 724 if (n) { 725 unsigned i; 726 727 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um"); 728 for (i = 0; i < n; i++) 729 pr_cont(" %s", errata[i]); 730 pr_cont(" enabled\n"); 731 } 732 } 733 734 static void l2c310_disable(void) 735 { 736 /* 737 * If full-line-of-zeros is enabled, we must first disable it in the 738 * Cortex-A9 auxiliary control register before disabling the L2 cache. 739 */ 740 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO) 741 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1))); 742 743 l2c_disable(); 744 } 745 746 static void l2c310_resume(void) 747 { 748 l2c_resume(); 749 750 /* Re-enable full-line-of-zeros for Cortex-A9 */ 751 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO) 752 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); 753 } 754 755 static void l2c310_unlock(void __iomem *base, unsigned num_lock) 756 { 757 if (readl_relaxed(base + L2X0_AUX_CTRL) & L310_AUX_CTRL_NS_LOCKDOWN) 758 l2c_unlock(base, num_lock); 759 } 760 761 static const struct l2c_init_data l2c310_init_fns __initconst = { 762 .type = "L2C-310", 763 .way_size_0 = SZ_8K, 764 .num_lock = 8, 765 .enable = l2c310_enable, 766 .fixup = l2c310_fixup, 767 .save = l2c310_save, 768 .configure = l2c310_configure, 769 .unlock = l2c310_unlock, 770 .outer_cache = { 771 .inv_range = l2c210_inv_range, 772 .clean_range = l2c210_clean_range, 773 .flush_range = l2c210_flush_range, 774 .flush_all = l2c210_flush_all, 775 .disable = l2c310_disable, 776 .sync = l2c210_sync, 777 .resume = l2c310_resume, 778 }, 779 }; 780 781 static int __init __l2c_init(const struct l2c_init_data *data, 782 u32 aux_val, u32 aux_mask, u32 cache_id, bool nosync) 783 { 784 struct outer_cache_fns fns; 785 unsigned way_size_bits, ways; 786 u32 aux, old_aux; 787 788 /* 789 * Save the pointer globally so that callbacks which do not receive 790 * context from callers can access the structure. 791 */ 792 l2x0_data = kmemdup(data, sizeof(*data), GFP_KERNEL); 793 if (!l2x0_data) 794 return -ENOMEM; 795 796 /* 797 * Sanity check the aux values. aux_mask is the bits we preserve 798 * from reading the hardware register, and aux_val is the bits we 799 * set. 800 */ 801 if (aux_val & aux_mask) 802 pr_alert("L2C: platform provided aux values permit register corruption.\n"); 803 804 old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 805 aux &= aux_mask; 806 aux |= aux_val; 807 808 if (old_aux != aux) 809 pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n", 810 old_aux, aux); 811 812 /* Determine the number of ways */ 813 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 814 case L2X0_CACHE_ID_PART_L310: 815 if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16)) 816 pr_warn("L2C: DT/platform tries to modify or specify cache size\n"); 817 if (aux & (1 << 16)) 818 ways = 16; 819 else 820 ways = 8; 821 break; 822 823 case L2X0_CACHE_ID_PART_L210: 824 case L2X0_CACHE_ID_PART_L220: 825 ways = (aux >> 13) & 0xf; 826 break; 827 828 case AURORA_CACHE_ID: 829 ways = (aux >> 13) & 0xf; 830 ways = 2 << ((ways + 1) >> 2); 831 break; 832 833 default: 834 /* Assume unknown chips have 8 ways */ 835 ways = 8; 836 break; 837 } 838 839 l2x0_way_mask = (1 << ways) - 1; 840 841 /* 842 * way_size_0 is the size that a way_size value of zero would be 843 * given the calculation: way_size = way_size_0 << way_size_bits. 844 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k, 845 * then way_size_0 would be 8k. 846 * 847 * L2 cache size = number of ways * way size. 848 */ 849 way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >> 850 L2C_AUX_CTRL_WAY_SIZE_SHIFT; 851 l2x0_size = ways * (data->way_size_0 << way_size_bits); 852 853 fns = data->outer_cache; 854 fns.write_sec = outer_cache.write_sec; 855 fns.configure = outer_cache.configure; 856 if (data->fixup) 857 data->fixup(l2x0_base, cache_id, &fns); 858 if (nosync) { 859 pr_info("L2C: disabling outer sync\n"); 860 fns.sync = NULL; 861 } 862 863 /* 864 * Check if l2x0 controller is already enabled. If we are booting 865 * in non-secure mode accessing the below registers will fault. 866 */ 867 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 868 l2x0_saved_regs.aux_ctrl = aux; 869 870 data->enable(l2x0_base, data->num_lock); 871 } 872 873 outer_cache = fns; 874 875 /* 876 * It is strange to save the register state before initialisation, 877 * but hey, this is what the DT implementations decided to do. 878 */ 879 if (data->save) 880 data->save(l2x0_base); 881 882 /* Re-read it in case some bits are reserved. */ 883 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 884 885 pr_info("%s cache controller enabled, %d ways, %d kB\n", 886 data->type, ways, l2x0_size >> 10); 887 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", 888 data->type, cache_id, aux); 889 890 l2x0_pmu_register(l2x0_base, cache_id); 891 892 return 0; 893 } 894 895 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 896 { 897 const struct l2c_init_data *data; 898 u32 cache_id; 899 900 l2x0_base = base; 901 902 cache_id = readl_relaxed(base + L2X0_CACHE_ID); 903 904 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 905 default: 906 case L2X0_CACHE_ID_PART_L210: 907 data = &l2c210_data; 908 break; 909 910 case L2X0_CACHE_ID_PART_L220: 911 data = &l2c220_data; 912 break; 913 914 case L2X0_CACHE_ID_PART_L310: 915 data = &l2c310_init_fns; 916 break; 917 } 918 919 /* Read back current (default) hardware configuration */ 920 if (data->save) 921 data->save(l2x0_base); 922 923 __l2c_init(data, aux_val, aux_mask, cache_id, false); 924 } 925 926 #ifdef CONFIG_OF 927 static int l2_wt_override; 928 929 /* Aurora don't have the cache ID register available, so we have to 930 * pass it though the device tree */ 931 static u32 cache_id_part_number_from_dt; 932 933 /** 934 * l2x0_cache_size_of_parse() - read cache size parameters from DT 935 * @np: the device tree node for the l2 cache 936 * @aux_val: pointer to machine-supplied auxilary register value, to 937 * be augmented by the call (bits to be set to 1) 938 * @aux_mask: pointer to machine-supplied auxilary register mask, to 939 * be augmented by the call (bits to be set to 0) 940 * @associativity: variable to return the calculated associativity in 941 * @max_way_size: the maximum size in bytes for the cache ways 942 */ 943 static int __init l2x0_cache_size_of_parse(const struct device_node *np, 944 u32 *aux_val, u32 *aux_mask, 945 u32 *associativity, 946 u32 max_way_size) 947 { 948 u32 mask = 0, val = 0; 949 u32 cache_size = 0, sets = 0; 950 u32 way_size_bits = 1; 951 u32 way_size = 0; 952 u32 block_size = 0; 953 u32 line_size = 0; 954 955 of_property_read_u32(np, "cache-size", &cache_size); 956 of_property_read_u32(np, "cache-sets", &sets); 957 of_property_read_u32(np, "cache-block-size", &block_size); 958 of_property_read_u32(np, "cache-line-size", &line_size); 959 960 if (!cache_size || !sets) 961 return -ENODEV; 962 963 /* All these l2 caches have the same line = block size actually */ 964 if (!line_size) { 965 if (block_size) { 966 /* If linesize is not given, it is equal to blocksize */ 967 line_size = block_size; 968 } else { 969 /* Fall back to known size */ 970 pr_warn("L2C OF: no cache block/line size given: " 971 "falling back to default size %d bytes\n", 972 CACHE_LINE_SIZE); 973 line_size = CACHE_LINE_SIZE; 974 } 975 } 976 977 if (line_size != CACHE_LINE_SIZE) 978 pr_warn("L2C OF: DT supplied line size %d bytes does " 979 "not match hardware line size of %d bytes\n", 980 line_size, 981 CACHE_LINE_SIZE); 982 983 /* 984 * Since: 985 * set size = cache size / sets 986 * ways = cache size / (sets * line size) 987 * way size = cache size / (cache size / (sets * line size)) 988 * way size = sets * line size 989 * associativity = ways = cache size / way size 990 */ 991 way_size = sets * line_size; 992 *associativity = cache_size / way_size; 993 994 if (way_size > max_way_size) { 995 pr_err("L2C OF: set size %dKB is too large\n", way_size); 996 return -EINVAL; 997 } 998 999 pr_info("L2C OF: override cache size: %d bytes (%dKB)\n", 1000 cache_size, cache_size >> 10); 1001 pr_info("L2C OF: override line size: %d bytes\n", line_size); 1002 pr_info("L2C OF: override way size: %d bytes (%dKB)\n", 1003 way_size, way_size >> 10); 1004 pr_info("L2C OF: override associativity: %d\n", *associativity); 1005 1006 /* 1007 * Calculates the bits 17:19 to set for way size: 1008 * 512KB -> 6, 256KB -> 5, ... 16KB -> 1 1009 */ 1010 way_size_bits = ilog2(way_size >> 10) - 3; 1011 if (way_size_bits < 1 || way_size_bits > 6) { 1012 pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n", 1013 way_size); 1014 return -EINVAL; 1015 } 1016 1017 mask |= L2C_AUX_CTRL_WAY_SIZE_MASK; 1018 val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT); 1019 1020 *aux_val &= ~mask; 1021 *aux_val |= val; 1022 *aux_mask &= ~mask; 1023 1024 return 0; 1025 } 1026 1027 static void __init l2x0_of_parse(const struct device_node *np, 1028 u32 *aux_val, u32 *aux_mask) 1029 { 1030 u32 data[2] = { 0, 0 }; 1031 u32 tag = 0; 1032 u32 dirty = 0; 1033 u32 val = 0, mask = 0; 1034 u32 assoc; 1035 int ret; 1036 1037 of_property_read_u32(np, "arm,tag-latency", &tag); 1038 if (tag) { 1039 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 1040 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 1041 } 1042 1043 of_property_read_u32_array(np, "arm,data-latency", 1044 data, ARRAY_SIZE(data)); 1045 if (data[0] && data[1]) { 1046 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 1047 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 1048 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 1049 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 1050 } 1051 1052 of_property_read_u32(np, "arm,dirty-latency", &dirty); 1053 if (dirty) { 1054 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 1055 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 1056 } 1057 1058 if (of_property_read_bool(np, "arm,parity-enable")) { 1059 mask &= ~L2C_AUX_CTRL_PARITY_ENABLE; 1060 val |= L2C_AUX_CTRL_PARITY_ENABLE; 1061 } else if (of_property_read_bool(np, "arm,parity-disable")) { 1062 mask &= ~L2C_AUX_CTRL_PARITY_ENABLE; 1063 } 1064 1065 if (of_property_read_bool(np, "arm,shared-override")) { 1066 mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE; 1067 val |= L2C_AUX_CTRL_SHARED_OVERRIDE; 1068 } 1069 1070 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K); 1071 if (ret) 1072 return; 1073 1074 if (assoc > 8) { 1075 pr_err("l2x0 of: cache setting yield too high associativity\n"); 1076 pr_err("l2x0 of: %d calculated, max 8\n", assoc); 1077 } else { 1078 mask |= L2X0_AUX_CTRL_ASSOC_MASK; 1079 val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT); 1080 } 1081 1082 *aux_val &= ~mask; 1083 *aux_val |= val; 1084 *aux_mask &= ~mask; 1085 } 1086 1087 static const struct l2c_init_data of_l2c210_data __initconst = { 1088 .type = "L2C-210", 1089 .way_size_0 = SZ_8K, 1090 .num_lock = 1, 1091 .of_parse = l2x0_of_parse, 1092 .enable = l2c_enable, 1093 .save = l2c_save, 1094 .configure = l2c_configure, 1095 .unlock = l2c_unlock, 1096 .outer_cache = { 1097 .inv_range = l2c210_inv_range, 1098 .clean_range = l2c210_clean_range, 1099 .flush_range = l2c210_flush_range, 1100 .flush_all = l2c210_flush_all, 1101 .disable = l2c_disable, 1102 .sync = l2c210_sync, 1103 .resume = l2c_resume, 1104 }, 1105 }; 1106 1107 static const struct l2c_init_data of_l2c220_data __initconst = { 1108 .type = "L2C-220", 1109 .way_size_0 = SZ_8K, 1110 .num_lock = 1, 1111 .of_parse = l2x0_of_parse, 1112 .enable = l2c220_enable, 1113 .save = l2c_save, 1114 .configure = l2c_configure, 1115 .unlock = l2c220_unlock, 1116 .outer_cache = { 1117 .inv_range = l2c220_inv_range, 1118 .clean_range = l2c220_clean_range, 1119 .flush_range = l2c220_flush_range, 1120 .flush_all = l2c220_flush_all, 1121 .disable = l2c_disable, 1122 .sync = l2c220_sync, 1123 .resume = l2c_resume, 1124 }, 1125 }; 1126 1127 static void __init l2c310_of_parse(const struct device_node *np, 1128 u32 *aux_val, u32 *aux_mask) 1129 { 1130 u32 data[3] = { 0, 0, 0 }; 1131 u32 tag[3] = { 0, 0, 0 }; 1132 u32 filter[2] = { 0, 0 }; 1133 u32 assoc; 1134 u32 prefetch; 1135 u32 power; 1136 u32 val; 1137 int ret; 1138 1139 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 1140 if (tag[0] && tag[1] && tag[2]) 1141 l2x0_saved_regs.tag_latency = 1142 L310_LATENCY_CTRL_RD(tag[0] - 1) | 1143 L310_LATENCY_CTRL_WR(tag[1] - 1) | 1144 L310_LATENCY_CTRL_SETUP(tag[2] - 1); 1145 1146 of_property_read_u32_array(np, "arm,data-latency", 1147 data, ARRAY_SIZE(data)); 1148 if (data[0] && data[1] && data[2]) 1149 l2x0_saved_regs.data_latency = 1150 L310_LATENCY_CTRL_RD(data[0] - 1) | 1151 L310_LATENCY_CTRL_WR(data[1] - 1) | 1152 L310_LATENCY_CTRL_SETUP(data[2] - 1); 1153 1154 of_property_read_u32_array(np, "arm,filter-ranges", 1155 filter, ARRAY_SIZE(filter)); 1156 if (filter[1]) { 1157 l2x0_saved_regs.filter_end = 1158 ALIGN(filter[0] + filter[1], SZ_1M); 1159 l2x0_saved_regs.filter_start = (filter[0] & ~(SZ_1M - 1)) 1160 | L310_ADDR_FILTER_EN; 1161 } 1162 1163 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); 1164 if (!ret) { 1165 switch (assoc) { 1166 case 16: 1167 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1168 *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; 1169 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1170 break; 1171 case 8: 1172 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1173 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1174 break; 1175 default: 1176 pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", 1177 assoc); 1178 break; 1179 } 1180 } 1181 1182 if (of_property_read_bool(np, "arm,shared-override")) { 1183 *aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE; 1184 *aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE; 1185 } 1186 1187 if (of_property_read_bool(np, "arm,parity-enable")) { 1188 *aux_val |= L2C_AUX_CTRL_PARITY_ENABLE; 1189 *aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE; 1190 } else if (of_property_read_bool(np, "arm,parity-disable")) { 1191 *aux_val &= ~L2C_AUX_CTRL_PARITY_ENABLE; 1192 *aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE; 1193 } 1194 1195 if (of_property_read_bool(np, "arm,early-bresp-disable")) 1196 l2x0_bresp_disable = true; 1197 1198 if (of_property_read_bool(np, "arm,full-line-zero-disable")) 1199 l2x0_flz_disable = true; 1200 1201 prefetch = l2x0_saved_regs.prefetch_ctrl; 1202 1203 ret = of_property_read_u32(np, "arm,double-linefill", &val); 1204 if (ret == 0) { 1205 if (val) 1206 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL; 1207 else 1208 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL; 1209 } else if (ret != -EINVAL) { 1210 pr_err("L2C-310 OF arm,double-linefill property value is missing\n"); 1211 } 1212 1213 ret = of_property_read_u32(np, "arm,double-linefill-incr", &val); 1214 if (ret == 0) { 1215 if (val) 1216 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_INCR; 1217 else 1218 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_INCR; 1219 } else if (ret != -EINVAL) { 1220 pr_err("L2C-310 OF arm,double-linefill-incr property value is missing\n"); 1221 } 1222 1223 ret = of_property_read_u32(np, "arm,double-linefill-wrap", &val); 1224 if (ret == 0) { 1225 if (!val) 1226 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP; 1227 else 1228 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP; 1229 } else if (ret != -EINVAL) { 1230 pr_err("L2C-310 OF arm,double-linefill-wrap property value is missing\n"); 1231 } 1232 1233 ret = of_property_read_u32(np, "arm,prefetch-drop", &val); 1234 if (ret == 0) { 1235 if (val) 1236 prefetch |= L310_PREFETCH_CTRL_PREFETCH_DROP; 1237 else 1238 prefetch &= ~L310_PREFETCH_CTRL_PREFETCH_DROP; 1239 } else if (ret != -EINVAL) { 1240 pr_err("L2C-310 OF arm,prefetch-drop property value is missing\n"); 1241 } 1242 1243 ret = of_property_read_u32(np, "arm,prefetch-offset", &val); 1244 if (ret == 0) { 1245 prefetch &= ~L310_PREFETCH_CTRL_OFFSET_MASK; 1246 prefetch |= val & L310_PREFETCH_CTRL_OFFSET_MASK; 1247 } else if (ret != -EINVAL) { 1248 pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n"); 1249 } 1250 1251 ret = of_property_read_u32(np, "prefetch-data", &val); 1252 if (ret == 0) { 1253 if (val) { 1254 prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH; 1255 *aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH; 1256 } else { 1257 prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; 1258 *aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; 1259 } 1260 *aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; 1261 } else if (ret != -EINVAL) { 1262 pr_err("L2C-310 OF prefetch-data property value is missing\n"); 1263 } 1264 1265 ret = of_property_read_u32(np, "prefetch-instr", &val); 1266 if (ret == 0) { 1267 if (val) { 1268 prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH; 1269 *aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH; 1270 } else { 1271 prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; 1272 *aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; 1273 } 1274 *aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; 1275 } else if (ret != -EINVAL) { 1276 pr_err("L2C-310 OF prefetch-instr property value is missing\n"); 1277 } 1278 1279 l2x0_saved_regs.prefetch_ctrl = prefetch; 1280 1281 power = l2x0_saved_regs.pwr_ctrl | 1282 L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN; 1283 1284 ret = of_property_read_u32(np, "arm,dynamic-clock-gating", &val); 1285 if (!ret) { 1286 if (!val) 1287 power &= ~L310_DYNAMIC_CLK_GATING_EN; 1288 } else if (ret != -EINVAL) { 1289 pr_err("L2C-310 OF dynamic-clock-gating property value is missing or invalid\n"); 1290 } 1291 ret = of_property_read_u32(np, "arm,standby-mode", &val); 1292 if (!ret) { 1293 if (!val) 1294 power &= ~L310_STNDBY_MODE_EN; 1295 } else if (ret != -EINVAL) { 1296 pr_err("L2C-310 OF standby-mode property value is missing or invalid\n"); 1297 } 1298 1299 l2x0_saved_regs.pwr_ctrl = power; 1300 } 1301 1302 static const struct l2c_init_data of_l2c310_data __initconst = { 1303 .type = "L2C-310", 1304 .way_size_0 = SZ_8K, 1305 .num_lock = 8, 1306 .of_parse = l2c310_of_parse, 1307 .enable = l2c310_enable, 1308 .fixup = l2c310_fixup, 1309 .save = l2c310_save, 1310 .configure = l2c310_configure, 1311 .unlock = l2c310_unlock, 1312 .outer_cache = { 1313 .inv_range = l2c210_inv_range, 1314 .clean_range = l2c210_clean_range, 1315 .flush_range = l2c210_flush_range, 1316 .flush_all = l2c210_flush_all, 1317 .disable = l2c310_disable, 1318 .sync = l2c210_sync, 1319 .resume = l2c310_resume, 1320 }, 1321 }; 1322 1323 /* 1324 * This is a variant of the of_l2c310_data with .sync set to 1325 * NULL. Outer sync operations are not needed when the system is I/O 1326 * coherent, and potentially harmful in certain situations (PCIe/PL310 1327 * deadlock on Armada 375/38x due to hardware I/O coherency). The 1328 * other operations are kept because they are infrequent (therefore do 1329 * not cause the deadlock in practice) and needed for secondary CPU 1330 * boot and other power management activities. 1331 */ 1332 static const struct l2c_init_data of_l2c310_coherent_data __initconst = { 1333 .type = "L2C-310 Coherent", 1334 .way_size_0 = SZ_8K, 1335 .num_lock = 8, 1336 .of_parse = l2c310_of_parse, 1337 .enable = l2c310_enable, 1338 .fixup = l2c310_fixup, 1339 .save = l2c310_save, 1340 .configure = l2c310_configure, 1341 .unlock = l2c310_unlock, 1342 .outer_cache = { 1343 .inv_range = l2c210_inv_range, 1344 .clean_range = l2c210_clean_range, 1345 .flush_range = l2c210_flush_range, 1346 .flush_all = l2c210_flush_all, 1347 .disable = l2c310_disable, 1348 .resume = l2c310_resume, 1349 }, 1350 }; 1351 1352 /* 1353 * Note that the end addresses passed to Linux primitives are 1354 * noninclusive, while the hardware cache range operations use 1355 * inclusive start and end addresses. 1356 */ 1357 static unsigned long aurora_range_end(unsigned long start, unsigned long end) 1358 { 1359 /* 1360 * Limit the number of cache lines processed at once, 1361 * since cache range operations stall the CPU pipeline 1362 * until completion. 1363 */ 1364 if (end > start + AURORA_MAX_RANGE_SIZE) 1365 end = start + AURORA_MAX_RANGE_SIZE; 1366 1367 /* 1368 * Cache range operations can't straddle a page boundary. 1369 */ 1370 if (end > PAGE_ALIGN(start+1)) 1371 end = PAGE_ALIGN(start+1); 1372 1373 return end; 1374 } 1375 1376 static void aurora_pa_range(unsigned long start, unsigned long end, 1377 unsigned long offset) 1378 { 1379 void __iomem *base = l2x0_base; 1380 unsigned long range_end; 1381 unsigned long flags; 1382 1383 /* 1384 * round start and end adresses up to cache line size 1385 */ 1386 start &= ~(CACHE_LINE_SIZE - 1); 1387 end = ALIGN(end, CACHE_LINE_SIZE); 1388 1389 /* 1390 * perform operation on all full cache lines between 'start' and 'end' 1391 */ 1392 while (start < end) { 1393 range_end = aurora_range_end(start, end); 1394 1395 raw_spin_lock_irqsave(&l2x0_lock, flags); 1396 writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG); 1397 writel_relaxed(range_end - CACHE_LINE_SIZE, base + offset); 1398 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 1399 1400 writel_relaxed(0, base + AURORA_SYNC_REG); 1401 start = range_end; 1402 } 1403 } 1404 static void aurora_inv_range(unsigned long start, unsigned long end) 1405 { 1406 aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG); 1407 } 1408 1409 static void aurora_clean_range(unsigned long start, unsigned long end) 1410 { 1411 /* 1412 * If L2 is forced to WT, the L2 will always be clean and we 1413 * don't need to do anything here. 1414 */ 1415 if (!l2_wt_override) 1416 aurora_pa_range(start, end, AURORA_CLEAN_RANGE_REG); 1417 } 1418 1419 static void aurora_flush_range(unsigned long start, unsigned long end) 1420 { 1421 if (l2_wt_override) 1422 aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG); 1423 else 1424 aurora_pa_range(start, end, AURORA_FLUSH_RANGE_REG); 1425 } 1426 1427 static void aurora_flush_all(void) 1428 { 1429 void __iomem *base = l2x0_base; 1430 unsigned long flags; 1431 1432 /* clean all ways */ 1433 raw_spin_lock_irqsave(&l2x0_lock, flags); 1434 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 1435 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 1436 1437 writel_relaxed(0, base + AURORA_SYNC_REG); 1438 } 1439 1440 static void aurora_cache_sync(void) 1441 { 1442 writel_relaxed(0, l2x0_base + AURORA_SYNC_REG); 1443 } 1444 1445 static void aurora_disable(void) 1446 { 1447 void __iomem *base = l2x0_base; 1448 unsigned long flags; 1449 1450 raw_spin_lock_irqsave(&l2x0_lock, flags); 1451 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 1452 writel_relaxed(0, base + AURORA_SYNC_REG); 1453 l2c_write_sec(0, base, L2X0_CTRL); 1454 dsb(st); 1455 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 1456 } 1457 1458 static void aurora_save(void __iomem *base) 1459 { 1460 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL); 1461 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL); 1462 } 1463 1464 /* 1465 * For Aurora cache in no outer mode, enable via the CP15 coprocessor 1466 * broadcasting of cache commands to L2. 1467 */ 1468 static void __init aurora_enable_no_outer(void __iomem *base, 1469 unsigned num_lock) 1470 { 1471 u32 u; 1472 1473 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u)); 1474 u |= AURORA_CTRL_FW; /* Set the FW bit */ 1475 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u)); 1476 1477 isb(); 1478 1479 l2c_enable(base, num_lock); 1480 } 1481 1482 static void __init aurora_fixup(void __iomem *base, u32 cache_id, 1483 struct outer_cache_fns *fns) 1484 { 1485 sync_reg_offset = AURORA_SYNC_REG; 1486 } 1487 1488 static void __init aurora_of_parse(const struct device_node *np, 1489 u32 *aux_val, u32 *aux_mask) 1490 { 1491 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; 1492 u32 mask = AURORA_ACR_REPLACEMENT_MASK; 1493 1494 of_property_read_u32(np, "cache-id-part", 1495 &cache_id_part_number_from_dt); 1496 1497 /* Determine and save the write policy */ 1498 l2_wt_override = of_property_read_bool(np, "wt-override"); 1499 1500 if (l2_wt_override) { 1501 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; 1502 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; 1503 } 1504 1505 if (of_property_read_bool(np, "marvell,ecc-enable")) { 1506 mask |= AURORA_ACR_ECC_EN; 1507 val |= AURORA_ACR_ECC_EN; 1508 } 1509 1510 if (of_property_read_bool(np, "arm,parity-enable")) { 1511 mask |= AURORA_ACR_PARITY_EN; 1512 val |= AURORA_ACR_PARITY_EN; 1513 } else if (of_property_read_bool(np, "arm,parity-disable")) { 1514 mask |= AURORA_ACR_PARITY_EN; 1515 } 1516 1517 *aux_val &= ~mask; 1518 *aux_val |= val; 1519 *aux_mask &= ~mask; 1520 } 1521 1522 static const struct l2c_init_data of_aurora_with_outer_data __initconst = { 1523 .type = "Aurora", 1524 .way_size_0 = SZ_4K, 1525 .num_lock = 4, 1526 .of_parse = aurora_of_parse, 1527 .enable = l2c_enable, 1528 .fixup = aurora_fixup, 1529 .save = aurora_save, 1530 .configure = l2c_configure, 1531 .unlock = l2c_unlock, 1532 .outer_cache = { 1533 .inv_range = aurora_inv_range, 1534 .clean_range = aurora_clean_range, 1535 .flush_range = aurora_flush_range, 1536 .flush_all = aurora_flush_all, 1537 .disable = aurora_disable, 1538 .sync = aurora_cache_sync, 1539 .resume = l2c_resume, 1540 }, 1541 }; 1542 1543 static const struct l2c_init_data of_aurora_no_outer_data __initconst = { 1544 .type = "Aurora", 1545 .way_size_0 = SZ_4K, 1546 .num_lock = 4, 1547 .of_parse = aurora_of_parse, 1548 .enable = aurora_enable_no_outer, 1549 .fixup = aurora_fixup, 1550 .save = aurora_save, 1551 .configure = l2c_configure, 1552 .unlock = l2c_unlock, 1553 .outer_cache = { 1554 .resume = l2c_resume, 1555 }, 1556 }; 1557 1558 /* 1559 * For certain Broadcom SoCs, depending on the address range, different offsets 1560 * need to be added to the address before passing it to L2 for 1561 * invalidation/clean/flush 1562 * 1563 * Section Address Range Offset EMI 1564 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC 1565 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS 1566 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC 1567 * 1568 * When the start and end addresses have crossed two different sections, we 1569 * need to break the L2 operation into two, each within its own section. 1570 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and 1571 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) 1572 * 0xC0000000 - 0xC0001000 1573 * 1574 * Note 1: 1575 * By breaking a single L2 operation into two, we may potentially suffer some 1576 * performance hit, but keep in mind the cross section case is very rare 1577 * 1578 * Note 2: 1579 * We do not need to handle the case when the start address is in 1580 * Section 1 and the end address is in Section 3, since it is not a valid use 1581 * case 1582 * 1583 * Note 3: 1584 * Section 1 in practical terms can no longer be used on rev A2. Because of 1585 * that the code does not need to handle section 1 at all. 1586 * 1587 */ 1588 #define BCM_SYS_EMI_START_ADDR 0x40000000UL 1589 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL 1590 1591 #define BCM_SYS_EMI_OFFSET 0x40000000UL 1592 #define BCM_VC_EMI_OFFSET 0x80000000UL 1593 1594 static inline int bcm_addr_is_sys_emi(unsigned long addr) 1595 { 1596 return (addr >= BCM_SYS_EMI_START_ADDR) && 1597 (addr < BCM_VC_EMI_SEC3_START_ADDR); 1598 } 1599 1600 static inline unsigned long bcm_l2_phys_addr(unsigned long addr) 1601 { 1602 if (bcm_addr_is_sys_emi(addr)) 1603 return addr + BCM_SYS_EMI_OFFSET; 1604 else 1605 return addr + BCM_VC_EMI_OFFSET; 1606 } 1607 1608 static void bcm_inv_range(unsigned long start, unsigned long end) 1609 { 1610 unsigned long new_start, new_end; 1611 1612 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1613 1614 if (unlikely(end <= start)) 1615 return; 1616 1617 new_start = bcm_l2_phys_addr(start); 1618 new_end = bcm_l2_phys_addr(end); 1619 1620 /* normal case, no cross section between start and end */ 1621 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1622 l2c210_inv_range(new_start, new_end); 1623 return; 1624 } 1625 1626 /* They cross sections, so it can only be a cross from section 1627 * 2 to section 3 1628 */ 1629 l2c210_inv_range(new_start, 1630 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1631 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1632 new_end); 1633 } 1634 1635 static void bcm_clean_range(unsigned long start, unsigned long end) 1636 { 1637 unsigned long new_start, new_end; 1638 1639 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1640 1641 if (unlikely(end <= start)) 1642 return; 1643 1644 new_start = bcm_l2_phys_addr(start); 1645 new_end = bcm_l2_phys_addr(end); 1646 1647 /* normal case, no cross section between start and end */ 1648 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1649 l2c210_clean_range(new_start, new_end); 1650 return; 1651 } 1652 1653 /* They cross sections, so it can only be a cross from section 1654 * 2 to section 3 1655 */ 1656 l2c210_clean_range(new_start, 1657 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1658 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1659 new_end); 1660 } 1661 1662 static void bcm_flush_range(unsigned long start, unsigned long end) 1663 { 1664 unsigned long new_start, new_end; 1665 1666 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1667 1668 if (unlikely(end <= start)) 1669 return; 1670 1671 if ((end - start) >= l2x0_size) { 1672 outer_cache.flush_all(); 1673 return; 1674 } 1675 1676 new_start = bcm_l2_phys_addr(start); 1677 new_end = bcm_l2_phys_addr(end); 1678 1679 /* normal case, no cross section between start and end */ 1680 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1681 l2c210_flush_range(new_start, new_end); 1682 return; 1683 } 1684 1685 /* They cross sections, so it can only be a cross from section 1686 * 2 to section 3 1687 */ 1688 l2c210_flush_range(new_start, 1689 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1690 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1691 new_end); 1692 } 1693 1694 /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */ 1695 static const struct l2c_init_data of_bcm_l2x0_data __initconst = { 1696 .type = "BCM-L2C-310", 1697 .way_size_0 = SZ_8K, 1698 .num_lock = 8, 1699 .of_parse = l2c310_of_parse, 1700 .enable = l2c310_enable, 1701 .save = l2c310_save, 1702 .configure = l2c310_configure, 1703 .unlock = l2c310_unlock, 1704 .outer_cache = { 1705 .inv_range = bcm_inv_range, 1706 .clean_range = bcm_clean_range, 1707 .flush_range = bcm_flush_range, 1708 .flush_all = l2c210_flush_all, 1709 .disable = l2c310_disable, 1710 .sync = l2c210_sync, 1711 .resume = l2c310_resume, 1712 }, 1713 }; 1714 1715 static void __init tauros3_save(void __iomem *base) 1716 { 1717 l2c_save(base); 1718 1719 l2x0_saved_regs.aux2_ctrl = 1720 readl_relaxed(base + TAUROS3_AUX2_CTRL); 1721 l2x0_saved_regs.prefetch_ctrl = 1722 readl_relaxed(base + L310_PREFETCH_CTRL); 1723 } 1724 1725 static void tauros3_configure(void __iomem *base) 1726 { 1727 l2c_configure(base); 1728 writel_relaxed(l2x0_saved_regs.aux2_ctrl, 1729 base + TAUROS3_AUX2_CTRL); 1730 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 1731 base + L310_PREFETCH_CTRL); 1732 } 1733 1734 static const struct l2c_init_data of_tauros3_data __initconst = { 1735 .type = "Tauros3", 1736 .way_size_0 = SZ_8K, 1737 .num_lock = 8, 1738 .enable = l2c_enable, 1739 .save = tauros3_save, 1740 .configure = tauros3_configure, 1741 .unlock = l2c_unlock, 1742 /* Tauros3 broadcasts L1 cache operations to L2 */ 1743 .outer_cache = { 1744 .resume = l2c_resume, 1745 }, 1746 }; 1747 1748 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } 1749 static const struct of_device_id l2x0_ids[] __initconst = { 1750 L2C_ID("arm,l210-cache", of_l2c210_data), 1751 L2C_ID("arm,l220-cache", of_l2c220_data), 1752 L2C_ID("arm,pl310-cache", of_l2c310_data), 1753 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1754 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data), 1755 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data), 1756 L2C_ID("marvell,tauros3-cache", of_tauros3_data), 1757 /* Deprecated IDs */ 1758 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1759 {} 1760 }; 1761 1762 int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 1763 { 1764 const struct l2c_init_data *data; 1765 struct device_node *np; 1766 struct resource res; 1767 u32 cache_id, old_aux; 1768 u32 cache_level = 2; 1769 bool nosync = false; 1770 1771 np = of_find_matching_node(NULL, l2x0_ids); 1772 if (!np) 1773 return -ENODEV; 1774 1775 if (of_address_to_resource(np, 0, &res)) 1776 return -ENODEV; 1777 1778 l2x0_base = ioremap(res.start, resource_size(&res)); 1779 if (!l2x0_base) 1780 return -ENOMEM; 1781 1782 l2x0_saved_regs.phy_base = res.start; 1783 1784 data = of_match_node(l2x0_ids, np)->data; 1785 1786 if (of_device_is_compatible(np, "arm,pl310-cache") && 1787 of_property_read_bool(np, "arm,io-coherent")) 1788 data = &of_l2c310_coherent_data; 1789 1790 old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 1791 if (old_aux != ((old_aux & aux_mask) | aux_val)) { 1792 pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n", 1793 old_aux, (old_aux & aux_mask) | aux_val); 1794 } else if (aux_mask != ~0U && aux_val != 0) { 1795 pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n"); 1796 } 1797 1798 /* All L2 caches are unified, so this property should be specified */ 1799 if (!of_property_read_bool(np, "cache-unified")) 1800 pr_err("L2C: device tree omits to specify unified cache\n"); 1801 1802 if (of_property_read_u32(np, "cache-level", &cache_level)) 1803 pr_err("L2C: device tree omits to specify cache-level\n"); 1804 1805 if (cache_level != 2) 1806 pr_err("L2C: device tree specifies invalid cache level\n"); 1807 1808 nosync = of_property_read_bool(np, "arm,outer-sync-disable"); 1809 1810 /* Read back current (default) hardware configuration */ 1811 if (data->save) 1812 data->save(l2x0_base); 1813 1814 /* L2 configuration can only be changed if the cache is disabled */ 1815 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 1816 if (data->of_parse) 1817 data->of_parse(np, &aux_val, &aux_mask); 1818 1819 if (cache_id_part_number_from_dt) 1820 cache_id = cache_id_part_number_from_dt; 1821 else 1822 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 1823 1824 return __l2c_init(data, aux_val, aux_mask, cache_id, nosync); 1825 } 1826 #endif 1827